content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from django.contrib import auth
from django.shortcuts import render
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
""" create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""
retrieve auhtenticated user
"""
return self.request.user | 30.545455 | 66 | 0.772817 | [
"MIT"
] | xemperforya/recipe-app-api | app/user/views.py | 1,008 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-03-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedclustersnapshots')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-03-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-03-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-03-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_tags_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-03-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-03-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ManagedClusterSnapshotsOperations(object):
"""ManagedClusterSnapshotsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2022_03_02_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.ManagedClusterSnapshotListResult"]:
"""Gets a list of managed cluster snapshots in the specified subscription.
Gets a list of managed cluster snapshots in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterSnapshotListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterSnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedclustersnapshots'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.ManagedClusterSnapshotListResult"]:
"""Lists managed cluster snapshots in the specified subscription and resource group.
Lists managed cluster snapshots in the specified subscription and resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterSnapshotListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterSnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterSnapshot":
"""Gets a managed cluster snapshot.
Gets a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterSnapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterSnapshot",
**kwargs: Any
) -> "_models.ManagedClusterSnapshot":
"""Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update.
:type parameters:
~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterSnapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterSnapshot')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}'} # type: ignore
@distributed_trace
def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedClusterSnapshot":
"""Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2022_03_02_preview.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterSnapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.update_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
"""Deletes a managed cluster snapshot.
Deletes a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}'} # type: ignore
| 42.123438 | 201 | 0.678734 | [
"MIT"
] | Hamster-Huey/azure-cli-extensions | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_03_02_preview/operations/_managed_cluster_snapshots_operations.py | 26,959 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-10 12:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0004_auto_20190310_1510'),
]
operations = [
migrations.AlterField(
model_name='image',
name='comment',
field=models.TextField(blank=True, null=True),
),
]
| 21.619048 | 58 | 0.614537 | [
"MIT"
] | viisualworks/instanoir | gram/migrations/0005_auto_20190310_1523.py | 454 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Databricks hook.
This hook enable the submitting and running of jobs to the Databricks platform. Internally the
operators talk to the
``api/2.1/jobs/run-now``
`endpoint <https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow>_`
or the ``api/2.1/jobs/runs/submit``
`endpoint <https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit>`_.
"""
from typing import Any, Dict, List, Optional
from requests import exceptions as requests_exceptions
from airflow.exceptions import AirflowException
from airflow.providers.databricks.hooks.databricks_base import BaseDatabricksHook
RESTART_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/restart")
START_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/start")
TERMINATE_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/delete")
RUN_NOW_ENDPOINT = ('POST', 'api/2.1/jobs/run-now')
SUBMIT_RUN_ENDPOINT = ('POST', 'api/2.1/jobs/runs/submit')
GET_RUN_ENDPOINT = ('GET', 'api/2.1/jobs/runs/get')
CANCEL_RUN_ENDPOINT = ('POST', 'api/2.1/jobs/runs/cancel')
OUTPUT_RUNS_JOB_ENDPOINT = ('GET', 'api/2.1/jobs/runs/get-output')
INSTALL_LIBS_ENDPOINT = ('POST', 'api/2.0/libraries/install')
UNINSTALL_LIBS_ENDPOINT = ('POST', 'api/2.0/libraries/uninstall')
LIST_JOBS_ENDPOINT = ('GET', 'api/2.1/jobs/list')
WORKSPACE_GET_STATUS_ENDPOINT = ('GET', 'api/2.0/workspace/get-status')
RUN_LIFE_CYCLE_STATES = ['PENDING', 'RUNNING', 'TERMINATING', 'TERMINATED', 'SKIPPED', 'INTERNAL_ERROR']
class RunState:
"""Utility class for the run state concept of Databricks runs."""
def __init__(
self, life_cycle_state: str, result_state: str = '', state_message: str = '', *args, **kwargs
) -> None:
self.life_cycle_state = life_cycle_state
self.result_state = result_state
self.state_message = state_message
@property
def is_terminal(self) -> bool:
"""True if the current state is a terminal state."""
if self.life_cycle_state not in RUN_LIFE_CYCLE_STATES:
raise AirflowException(
(
'Unexpected life cycle state: {}: If the state has '
'been introduced recently, please check the Databricks user '
'guide for troubleshooting information'
).format(self.life_cycle_state)
)
return self.life_cycle_state in ('TERMINATED', 'SKIPPED', 'INTERNAL_ERROR')
@property
def is_successful(self) -> bool:
"""True if the result state is SUCCESS"""
return self.result_state == 'SUCCESS'
def __eq__(self, other: object) -> bool:
if not isinstance(other, RunState):
return NotImplemented
return (
self.life_cycle_state == other.life_cycle_state
and self.result_state == other.result_state
and self.state_message == other.state_message
)
def __repr__(self) -> str:
return str(self.__dict__)
class DatabricksHook(BaseDatabricksHook):
"""
Interact with Databricks.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:param retry_limit: The number of times to retry the connection in case of
service outages.
:param retry_delay: The number of seconds to wait between retries (it
might be a floating point number).
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
"""
hook_name = 'Databricks'
def __init__(
self,
databricks_conn_id: str = BaseDatabricksHook.default_conn_name,
timeout_seconds: int = 180,
retry_limit: int = 3,
retry_delay: float = 1.0,
retry_args: Optional[Dict[Any, Any]] = None,
) -> None:
super().__init__(databricks_conn_id, timeout_seconds, retry_limit, retry_delay, retry_args)
def run_now(self, json: dict) -> int:
"""
Utility function to call the ``api/2.0/jobs/run-now`` endpoint.
:param json: The data used in the body of the request to the ``run-now`` endpoint.
:return: the run_id as an int
:rtype: str
"""
response = self._do_api_call(RUN_NOW_ENDPOINT, json)
return response['run_id']
def submit_run(self, json: dict) -> int:
"""
Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.
:param json: The data used in the body of the request to the ``submit`` endpoint.
:return: the run_id as an int
:rtype: str
"""
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response['run_id']
def list_jobs(self, limit: int = 25, offset: int = 0, expand_tasks: bool = False) -> List[Dict[str, Any]]:
"""
Lists the jobs in the Databricks Job Service.
:param limit: The limit/batch size used to retrieve jobs.
:param offset: The offset of the first job to return, relative to the most recently created job.
:param expand_tasks: Whether to include task and cluster details in the response.
:return: A list of jobs.
"""
has_more = True
jobs = []
while has_more:
json = {
'limit': limit,
'offset': offset,
'expand_tasks': expand_tasks,
}
response = self._do_api_call(LIST_JOBS_ENDPOINT, json)
jobs += response['jobs'] if 'jobs' in response else []
has_more = response.get('has_more', False)
if has_more:
offset += len(response['jobs'])
return jobs
def find_job_id_by_name(self, job_name: str) -> Optional[int]:
"""
Finds job id by its name. If there are multiple jobs with the same name, raises AirflowException.
:param job_name: The name of the job to look up.
:return: The job_id as an int or None if no job was found.
"""
all_jobs = self.list_jobs()
matching_jobs = [j for j in all_jobs if j['settings']['name'] == job_name]
if len(matching_jobs) > 1:
raise AirflowException(
f"There are more than one job with name {job_name}. Please delete duplicated jobs first"
)
if not matching_jobs:
return None
else:
return matching_jobs[0]['job_id']
def get_run_page_url(self, run_id: int) -> str:
"""
Retrieves run_page_url.
:param run_id: id of the run
:return: URL of the run page
"""
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['run_page_url']
def get_job_id(self, run_id: int) -> int:
"""
Retrieves job_id from run_id.
:param run_id: id of the run
:return: Job id for given Databricks run
"""
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['job_id']
def get_run_state(self, run_id: int) -> RunState:
"""
Retrieves run state of the run.
Please note that any Airflow tasks that call the ``get_run_state`` method will result in
failure unless you have enabled xcom pickling. This can be done using the following
environment variable: ``AIRFLOW__CORE__ENABLE_XCOM_PICKLING``
If you do not want to enable xcom pickling, use the ``get_run_state_str`` method to get
a string describing state, or ``get_run_state_lifecycle``, ``get_run_state_result``, or
``get_run_state_message`` to get individual components of the run state.
:param run_id: id of the run
:return: state of the run
"""
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response['state']
return RunState(**state)
def get_run_state_str(self, run_id: int) -> str:
"""
Return the string representation of RunState.
:param run_id: id of the run
:return: string describing run state
"""
state = self.get_run_state(run_id)
run_state_str = (
f"State: {state.life_cycle_state}. Result: {state.result_state}. {state.state_message}"
)
return run_state_str
def get_run_state_lifecycle(self, run_id: int) -> str:
"""
Returns the lifecycle state of the run
:param run_id: id of the run
:return: string with lifecycle state
"""
return self.get_run_state(run_id).life_cycle_state
def get_run_state_result(self, run_id: int) -> str:
"""
Returns the resulting state of the run
:param run_id: id of the run
:return: string with resulting state
"""
return self.get_run_state(run_id).result_state
def get_run_state_message(self, run_id: int) -> str:
"""
Returns the state message for the run
:param run_id: id of the run
:return: string with state message
"""
return self.get_run_state(run_id).state_message
def get_run_output(self, run_id: int) -> dict:
"""
Retrieves run output of the run.
:param run_id: id of the run
:return: output of the run
"""
json = {'run_id': run_id}
run_output = self._do_api_call(OUTPUT_RUNS_JOB_ENDPOINT, json)
return run_output
def cancel_run(self, run_id: int) -> None:
"""
Cancels the run.
:param run_id: id of the run
"""
json = {'run_id': run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
def restart_cluster(self, json: dict) -> None:
"""
Restarts the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(RESTART_CLUSTER_ENDPOINT, json)
def start_cluster(self, json: dict) -> None:
"""
Starts the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(START_CLUSTER_ENDPOINT, json)
def terminate_cluster(self, json: dict) -> None:
"""
Terminates the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(TERMINATE_CLUSTER_ENDPOINT, json)
def install(self, json: dict) -> None:
"""
Install libraries on the cluster.
Utility function to call the ``2.0/libraries/install`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
"""
self._do_api_call(INSTALL_LIBS_ENDPOINT, json)
def uninstall(self, json: dict) -> None:
"""
Uninstall libraries on the cluster.
Utility function to call the ``2.0/libraries/uninstall`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
"""
self._do_api_call(UNINSTALL_LIBS_ENDPOINT, json)
def update_repo(self, repo_id: str, json: Dict[str, Any]) -> dict:
"""
Updates given Databricks Repos
:param repo_id: ID of Databricks Repos
:param json: payload
:return: metadata from update
"""
repos_endpoint = ('PATCH', f'api/2.0/repos/{repo_id}')
return self._do_api_call(repos_endpoint, json)
def delete_repo(self, repo_id: str):
"""
Deletes given Databricks Repos
:param repo_id: ID of Databricks Repos
:return:
"""
repos_endpoint = ('DELETE', f'api/2.0/repos/{repo_id}')
self._do_api_call(repos_endpoint)
def create_repo(self, json: Dict[str, Any]) -> dict:
"""
Creates a Databricks Repos
:param json: payload
:return:
"""
repos_endpoint = ('POST', 'api/2.0/repos')
return self._do_api_call(repos_endpoint, json)
def get_repo_by_path(self, path: str) -> Optional[str]:
"""
Obtains Repos ID by path
:param path: path to a repository
:return: Repos ID if it exists, None if doesn't.
"""
try:
result = self._do_api_call(WORKSPACE_GET_STATUS_ENDPOINT, {'path': path}, wrap_http_errors=False)
if result.get('object_type', '') == 'REPO':
return str(result['object_id'])
except requests_exceptions.HTTPError as e:
if e.response.status_code != 404:
raise e
return None
| 35.209424 | 110 | 0.635167 | [
"Apache-2.0"
] | AMS-Kepler/airflow | airflow/providers/databricks/hooks/databricks.py | 13,450 | Python |
import pytorch_lightning as pl
from pytorch_lightning.utilities.parsing import lightning_getattr, lightning_setattr
class MultipleShootingCallback(pl.Callback):
"""This callback increases the length of the training sequences each epoch.
This technique is well known in the SciML community and documented in their tutorials
[1] as a way to avoid falling into local minima when training ODE based models. We can
also see this as an instance of multiple shooting [2, 3] in the data space, where the
penalty function enforcing the equality constraints at the splitting points is equal
to the loss function.
Note that the number of target steps will never increase over the initial number of
target steps configured in the data module.
[1] https://diffeqflux.sciml.ai/dev/examples/local_minima/
[2] https://diffeqflux.sciml.ai/dev/examples/multiple_shooting/
[3] Evren Mert Turan, Johannes Jäschke, "Multiple shooting for training neural
differential equations on time series", https://arxiv.org/abs/2109.06786
Attributes
----------
initial_steps
Number of target steps in the first epoch
increase
The target steps increase by this much in each following epoch
target_steps_attr
Name of the data module attribute that should be modified
"""
def __init__(
self,
*,
initial_steps: int = 3,
increase: int = 1,
target_steps_attr: str = "train_target_steps",
):
super().__init__()
self.initial_steps = initial_steps
self.increase = increase
self.target_steps_attr = target_steps_attr
self.initial_target_steps = None
def on_train_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
self.initial_target_steps = lightning_getattr(pl_module, self.target_steps_attr)
# Set the initial steps in this hook because the trainer selects the train
# dataloader internally before train_epoch_start is called.
lightning_setattr(pl_module, self.target_steps_attr, self.initial_steps)
trainer.reset_train_dataloader(pl_module)
def on_train_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
pl_module.log(
self.target_steps_attr,
float(lightning_getattr(pl_module, self.target_steps_attr)),
on_step=False,
on_epoch=True,
)
def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Trainer loads the data loader before the train_epoch_start hook is called, so we
# set the target steps already at the end of the previous epoch
prev_target_steps = lightning_getattr(pl_module, self.target_steps_attr)
target_steps = prev_target_steps + self.increase
if self.initial_target_steps is not None:
target_steps = min(target_steps, self.initial_target_steps)
if target_steps != prev_target_steps:
lightning_setattr(pl_module, self.target_steps_attr, target_steps)
trainer.reset_train_dataloader(pl_module)
| 43.5 | 90 | 0.715837 | [
"MIT"
] | martenlienen/finite-element-networks | finite_element_networks/lightning/callbacks.py | 3,133 | Python |
import os
import sys
sys.path.append('/trinity/home/m.gasanov/monica/sensitivity/sobol/SALib')
from SALib.analyze import sobol
from SALib.sample import saltelli
from SALib.test_functions import Ishigami
from SALib.util import read_param_file
from numpy import genfromtxt
# Read the parameter range file and generate samples
#problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt')
problem = {
'num_vars':9,
'names':['pH10', 'pH20', 'pH30', 'pH40',\
'pH50', 'pH60', 'pH70', 'pH80', 'pH90'],
'bounds':[[3.0, 9.0],
[3.0, 9.0],
[3.0, 9.0],
[3.0, 9.0],
[3.0, 9.0],
[3.0, 9.0],
[3.0, 9.0],
[3.0, 9.0],
[3.0, 9.0]]
}
param_values = saltelli.sample(problem, 100000)
# Generate samples
list_os_csv=['soybean-000-2015.csv', 'sugar-beet-2011.csv', 'sugar-beet-2017.csv',
'spring-barley-2012.csv', 'sugar-beet-2014.csv']
# Run the "model" and save the output in a text file
# This will happen offline for external models
#Y = Ishigami.evaluate(param_values)
# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
for i in list_os_csv:
all_data_csv = genfromtxt(str(i), delimiter=',')
output = all_data_csv[:,2]
print(i)
Si = sobol.analyze(problem, output, calc_second_order=True, conf_level=0.95, print_to_console=True)
# Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf'
# e.g. Si['S1'] contains the first-order index for each parameter,
# in the same order as the parameter file
# The optional second-order indices are now returned in keys 'S2', 'S2_conf'
# These are both upper triangular DxD matrices with nan's in the duplicate
# entries.
# Optional keyword arguments parallel=True and n_processors=(int) for parallel execution
# using multiprocessing
# First-order indices expected with Saltelli sampling:
# x1: 0.3139
# x2: 0.4424
# x3: 0.0
| 36.472727 | 103 | 0.674477 | [
"Apache-2.0"
] | mishagrol/SA_agro_model | Sobol_SA_horizont/CN/sobol.py | 2,006 | Python |
from .Detector import Detector
import time
import numpy as np
import PyTango
class LC400Buffer(Detector):
"""
Class representing the LC400 piezo machine under the
control of the LC400ScanControl Tango device, used for
reading the flyscan positions.
"""
def __init__(self, name=None, device=None, xaxis=2, yaxis=3, zaxis=1):
self.proxy = PyTango.DeviceProxy(device)
Detector.__init__(self, name=name)
self.xaxis = xaxis
self.yaxis = yaxis
self.zaxis = zaxis
def initialize(self):
self.proxy.init()
def stop(self):
self.proxy.Stop()
def busy(self):
ok_states = (PyTango.DevState.STANDBY, PyTango.DevState.ON)
return not (self.proxy.State() in ok_states)
def __emergency_backup(self):
# grab these values in case we have to restart and reset
grab_keys = ("FlyScanMotorStartPosition", "FlyScanMotorEndPosition",
"NumberOfIntervals", "GateWidth", "GateLatency",
"FlyScanMotorAxis")
self.sc_params = {
k: self.proxy.read_attribute(k).value for k in grab_keys}
def __emergency_recover(self):
ec0 = PyTango.DeviceProxy('tango/admin/b-v-nanomax-ec-0')
ioc = PyTango.DeviceProxy('tango/admin/b-nanomax-ec-6')
ec0.HardKillServer('LC400ScanControl/B303A')
ioc.HardKillServer('NpointLC400/B303A')
print('Killing the npoint devices and waiting...')
for i in range(10):
print('*')
time.sleep(1)
ioc.DevStart('NpointLC400/B303A')
print('Starting the npoint motor device and waiting...')
for i in range(10):
print('*')
time.sleep(1)
ec0.DevStart('LC400ScanControl/B303A')
print('Starting the npoint scancontrol device and waiting...')
for i in range(10):
print('*')
time.sleep(1)
self.initialize()
for k, v in self.sc_params.items():
self.proxy.write_attribute(k, v)
self.proxy.ConfigureLC400Motion()
self.proxy.ConfigureLC400Recorder()
self.proxy.ConfigureStanford()
def read(self):
self.__emergency_backup()
try:
self.proxy.ReadLC400Buffer()
data = {1: self.proxy.Axis1Positions,
2: self.proxy.Axis2Positions,
3: self.proxy.Axis3Positions}
self.length = len(data[1])
except PyTango.DevFailed:
self.__emergency_recover()
fake = np.ones(self.length, dtype=np.float) * -1
data = {i: fake for i in (1, 2, 3)}
return {'x': data[self.xaxis],
'y': data[self.yaxis],
'z': data[self.zaxis]}
def start(self):
"""
Placeholder, this detector just reads out whatever buffer is on the
scancontrol device. That device is managed manually from macros.
"""
pass
| 33.852273 | 76 | 0.595166 | [
"MIT"
] | alexbjorling/acquisition-framework | contrast/detectors/LC400Buffer.py | 2,979 | Python |
for tt in range(int(input())):
n = int(input())
ans = 0
arr = list(map(int,input().split()))
freq = dict()
for i in range(n):
arr[i] -= i
if arr[i] not in freq:
freq[arr[i]] = 0
freq[arr[i]] += 1
for i in freq:
ans += freq[i]*(freq[i]-1)//2
print(ans) | 22.666667 | 40 | 0.432353 | [
"MIT"
] | mishrakeshav/Competitive-Programming | Code Forces/Round 719/D.py | 340 | Python |
# -*- coding: utf-8 -*-
"""OpenCTI valhalla connector core module."""
import os
import yaml
import time
from typing import Any, Dict, Mapping, Optional
from datetime import datetime
from .knowledge import KnowledgeImporter
from .models import Status
from pycti import OpenCTIConnectorHelper, get_config_variable
from stix2 import TLP_WHITE, TLP_AMBER
from valhallaAPI.valhalla import ValhallaAPI
class Valhalla:
"""OpenCTI valhalla main class"""
_DEMO_API_KEY = "1111111111111111111111111111111111111111111111111111111111111111"
_STATE_LAST_RUN = "last_run"
_VALHALLA_LAST_VERSION = "valhalla_last_version"
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/../config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.SafeLoader)
if os.path.isfile(config_file_path)
else {}
)
# Extra config
self.confidence_level = get_config_variable(
"CONNECTOR_CONFIDENCE_LEVEL",
["connector", "confidence_level"],
config,
isNumber=True,
)
self.update_existing_data = get_config_variable(
"CONNECTOR_UPDATE_EXISTING_DATA",
["connector", "update_existing_data"],
config,
)
self.API_KEY = get_config_variable(
"VALHALLA_API_KEY", ["valhalla", "api_key"], config
)
self.INTERVAL_SEC = get_config_variable(
"VALHALLA_INTERVAL_SEC", ["valhalla", "interval_sec"], config, isNumber=True
)
self.helper = OpenCTIConnectorHelper(config)
self.helper.log_info(f"loaded valhalla config: {config}")
# If we run without API key we can assume all data is TLP:WHITE else we
# default to TLP:AMBER to be safe.
if self.API_KEY == "" or self.API_KEY is None:
self.default_marking = self.helper.api.marking_definition.read(
id=TLP_WHITE["id"]
)
self.valhalla_client = ValhallaAPI()
else:
self.default_marking = self.helper.api.marking_definition.read(
id=TLP_AMBER["id"]
)
self.valhalla_client = ValhallaAPI(api_key=self.API_KEY)
self.knowledge_importer = KnowledgeImporter(
self.helper,
self.confidence_level,
self.update_existing_data,
self.default_marking,
self.valhalla_client,
)
def run(self):
self.helper.log_info("starting valhalla connector...")
while True:
try:
status_data = self.valhalla_client.get_status()
api_status = Status.parse_obj(status_data)
self.helper.log_info(f"current valhalla status: {api_status}")
current_time = int(datetime.utcnow().timestamp())
current_state = self._load_state()
self.helper.log_info(f"loaded state: {current_state}")
last_run = self._get_state_value(current_state, self._STATE_LAST_RUN)
last_valhalla_version = self._get_state_value(
current_state, self._VALHALLA_LAST_VERSION
)
if self._is_scheduled(last_run, current_time) and self._check_version(
last_valhalla_version, api_status.version
):
self.helper.log_info("running importers")
knowledge_importer_state = self._run_knowledge_importer(
current_state
)
self.helper.log_info("done with running importers")
new_state = current_state.copy()
new_state.update(knowledge_importer_state)
new_state[self._STATE_LAST_RUN] = int(datetime.utcnow().timestamp())
new_state[self._VALHALLA_LAST_VERSION] = api_status.version
self.helper.log_info(f"storing new state: {new_state}")
self.helper.set_state(new_state)
self.helper.log_info(
f"state stored, next run in: {self._get_interval()} seconds"
)
else:
new_interval = self._get_interval() - (current_time - last_run)
self.helper.log_info(
f"connector will not run, next run in: {new_interval} seconds"
)
# After a successful run pause at least 60sec
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("connector stop")
exit(0)
except Exception as e:
self.helper.log_error(str(e))
exit(0)
def _run_knowledge_importer(
self, current_state: Mapping[str, Any]
) -> Mapping[str, Any]:
return self.knowledge_importer.run(current_state)
def _get_interval(self) -> int:
return int(self.INTERVAL_SEC)
def _load_state(self) -> Dict[str, Any]:
current_state = self.helper.get_state()
if not current_state:
return {}
return current_state
@staticmethod
def _get_state_value(
state: Optional[Mapping[str, Any]], key: str, default: Optional[Any] = None
) -> Any:
if state is not None:
return state.get(key, default)
return default
def _is_scheduled(self, last_run: Optional[int], current_time: int) -> bool:
if last_run is None:
return True
time_diff = current_time - last_run
return time_diff >= self._get_interval()
def _check_version(self, last_version: Optional[int], current_version: int) -> bool:
if last_version is None:
return True
return current_version > last_version
| 36.157576 | 88 | 0.599397 | [
"Apache-2.0"
] | DEMON1A/connectors | valhalla/src/valhalla/core.py | 5,966 | Python |
from interp_marcs_alpha_v6 import interp_marcs
import numpy as np
import time
input_model_path='/project2/alexji/MARCS'
output_model_path='test-MARCS'
teff_arr = [3200,3300,3400,3500,3600,3700,3800,3900,4000,4250,4500,4750,
5000]
logg_arr = np.arange(0., 5.5, 0.5)
feh_arr = np.arange(-4., 1.5, 0.5)
alphafe_arr = [-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
teff_arr = [3400,4000,4281,5000]
logg_arr = [0.0, 0.47, 0.88,2.0]
feh_arr = [-3.77, -2.85, -1.23,-1.5]
alphafe_arr = [-0.77, -0.12, 0.23, 0.66,0.4]
if __name__=="__main__":
start = time.time()
for teff in teff_arr:
for logg in logg_arr:
for feh in feh_arr:
for alphafe in alphafe_arr:
print(teff, logg, feh, alphafe)
interp_marcs(teff, logg, feh, alphafe,
output_model_path=output_model_path,
input_model_path=input_model_path,
check_file_exists=True, extrapol=True,
geometry='sph')
print("took",time.time()-start,"s")
| 36.472222 | 78 | 0.493526 | [
"MIT"
] | alexji/turbopy | turbopy/interp_marcs/testmies.py | 1,313 | Python |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Provide stub objects that can act as stand-in "dummy" datasets for simple use
cases, like getting all classes in a dataset. This exists so that demos can be
run without requiring users to download/install datasets first.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from utils.collections import AttrDict
def get_coco_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
| 48.5625 | 81 | 0.628486 | [
"MIT"
] | Bigwode/FPN-Pytorch | lib/datasets/dummy_datasets.py | 2,331 | Python |
import logging
from django.utils.decorators import method_decorator
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg.utils import swagger_auto_schema
from rest_condition import Or
from rest_framework import filters, viewsets
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import AllowAny, IsAuthenticated
from base.documentation import jwt_header
from users.models import Device
from users.serializers import DeviceSerializer, DeviceListSerializer
logger = logging.getLogger('user_app')
@method_decorator(name='list', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
@method_decorator(name='create', decorator=swagger_auto_schema(manual_parameters=[jwt_header]))
class DeviceViewSet(viewsets.ModelViewSet):
"""
Creating Device
"""
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
queryset = Device.objects.all()
serializer_class = DeviceSerializer
filter_backends = [filters.SearchFilter, DjangoFilterBackend]
search_fields = ['user_id', 'user__username']
filter_fields = ['user_id', 'user__username']
http_method_names = ['get', 'post']
def get_queryset(self):
if IsAdminUser.has_permission(self, request=self.request, view=self.get_view_name()):
return super().get_queryset()
return super().get_queryset().filter(user=self.request.user)
# def get_permissions(self):
# return [permission() for permission in permission_classes]
def get_serializer_class(self):
if self.action in ['list']:
return DeviceListSerializer
return DeviceSerializer
class DeviceList(APIView):
"""
An API endpoint for Getting device list
"""
permission_classes = [AllowAny]
def get(self, request, user_id):
queryset = Device.objects.filter(user_id=user_id).select_related('user')
serializer_context = {
'request': request
}
serializer = DeviceListSerializer(queryset, context=serializer_context, many=True)
data = serializer.data
if data:
return Response(data)
return Response({'message': f"No Device data found", 'data': []}) | 37.537313 | 97 | 0.749105 | [
"MIT"
] | System-Design-2/user-service | src/users/views/user.py | 2,515 | Python |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from utils.ie_tools import load_ie_model
class Detector:
"""Wrapper class for detector"""
def __init__(self, model_path, conf=.6, device='CPU', ext_path='', max_num_frames=1):
self.net = load_ie_model(model_path, device, None, ext_path, num_reqs=max_num_frames)
self.confidence = conf
self.expand_ratio = (1., 1.)
self.max_num_frames = max_num_frames
def get_detections(self, frames):
"""Returns all detections on frames"""
assert len(frames) <= self.max_num_frames
all_detections = []
for i in range(len(frames)):
self.net.forward_async(frames[i])
outputs = self.net.grab_all_async()
for i, out in enumerate(outputs):
detections = self.__decode_detections(out, frames[i].shape)
all_detections.append(detections)
return all_detections
def __decode_detections(self, out, frame_shape):
"""Decodes raw SSD output"""
detections = []
for detection in out[0, 0]:
confidence = detection[2]
if confidence > self.confidence:
left = int(max(detection[3], 0) * frame_shape[1])
top = int(max(detection[4], 0) * frame_shape[0])
right = int(max(detection[5], 0) * frame_shape[1])
bottom = int(max(detection[6], 0) * frame_shape[0])
if self.expand_ratio != (1., 1.):
w = (right - left)
h = (bottom - top)
dw = w * (self.expand_ratio[0] - 1.) / 2
dh = h * (self.expand_ratio[1] - 1.) / 2
left = max(int(left - dw), 0)
right = int(right + dw)
top = max(int(top - dh), 0)
bottom = int(bottom + dh)
detections.append(((left, top, right, bottom), confidence))
if len(detections) > 1:
detections.sort(key=lambda x: x[1], reverse=True)
return detections
class VectorCNN:
"""Wrapper class for a network returning a vector"""
def __init__(self, model_path, device='CPU', max_reqs=100):
self.max_reqs = max_reqs
self.net = load_ie_model(model_path, device, None, num_reqs=self.max_reqs)
def forward(self, batch):
"""Performs forward of the underlying network on a given batch"""
assert len(batch) <= self.max_reqs
for frame in batch:
self.net.forward_async(frame)
outputs = self.net.grab_all_async()
return outputs
| 38.333333 | 94 | 0.590373 | [
"MIT"
] | 565353780/open-vino | multi_camera_multi_person_tracking/utils/network_wrappers.py | 3,220 | Python |
import carla
from carla import ColorConverter as cc
from ROAR_Sim.carla_client.util.sensors import IMUSensor
from Bridges.bridge import Bridge
from typing import Union
from ROAR.utilities_module.vehicle_models import (
VehicleControl,
Vehicle,
)
from ROAR.utilities_module.data_structures_models import (
Location,
Rotation,
RGBData,
DepthData,
SensorsData,
IMUData,
Vector3D,
Transform,
)
from ROAR.utilities_module.utilities import png_to_depth
import numpy as np
import cv2
class CarlaBridge(Bridge):
def convert_location_from_source_to_agent(self, source: carla.Location) -> Location:
"""
Convert Location data from Carla.location to Agent's lcoation data type
invert the Z axis to make it into right hand coordinate system
Args:
source: carla.location
Returns:
"""
return Location(x=source.x, y=source.z, z=source.y)
def convert_rotation_from_source_to_agent(self, source: carla.Rotation) -> Rotation:
"""Convert a CARLA raw rotation to Rotation(pitch=float,yaw=float,roll=float)."""
return Rotation(pitch=source.yaw, yaw=source.pitch, roll=source.roll)
def convert_transform_from_source_to_agent(
self, source: carla.Transform
) -> Transform:
"""Convert CARLA raw location and rotation to Transform(location,rotation)."""
return Transform(
location=self.convert_location_from_source_to_agent(source=source.location),
rotation=self.convert_rotation_from_source_to_agent(source=source.rotation),
)
def convert_control_from_source_to_agent(
self, source: carla.VehicleControl
) -> VehicleControl:
"""Convert CARLA raw vehicle control to VehicleControl(throttle,steering)."""
return VehicleControl(
throttle=-1 * source.throttle if source.reverse else source.throttle,
steering=source.steer,
)
def convert_rgb_from_source_to_agent(
self, source: carla.Image
) -> Union[RGBData, None]:
"""Convert CARLA raw Image to a Union with RGB numpy array"""
try:
source.convert(cc.Raw)
return RGBData(data=self._to_rgb_array(source))
except:
return None
def convert_depth_from_source_to_agent(
self, source: carla.Image
) -> Union[DepthData, None]:
"""Convert CARLA raw depth info to """
try:
array = np.frombuffer(source.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (source.height, source.width, 4)) # BGRA
array = array[:, :, :3] # BGR
array = array[:, :, ::-1] # RGB
# array = array.swapaxes(0, 1)
array = png_to_depth(array)
# print(array[350][160], array[350][688])
return DepthData(data=array)
except:
return None
def convert_vector3d_from_source_to_agent(self, source: carla.Vector3D) -> Vector3D:
return Vector3D(x=source.x, y=source.y, z=source.z)
def convert_imu_from_source_to_agent(self, source: IMUSensor) -> IMUData:
return IMUData(
accelerometer=Vector3D(
x=source.accelerometer[0],
y=source.accelerometer[1],
z=source.accelerometer[2],
),
gyroscope=Vector3D(
x=source.gyroscope[0], y=source.gyroscope[1], z=source.gyroscope[2]
),
)
def convert_sensor_data_from_source_to_agent(self, source: dict) -> SensorsData:
return SensorsData(
front_rgb=self.convert_rgb_from_source_to_agent(
source=source.get("front_rgb", None)
),
rear_rgb=self.convert_rgb_from_source_to_agent(
source=source.get("rear_rgb", None)
),
front_depth=self.convert_depth_from_source_to_agent(
source=source.get("front_depth", None)
),
imu_data=self.convert_imu_from_source_to_agent(
source=source.get("imu", None)
),
)
def convert_vehicle_from_source_to_agent(self, source: carla.Vehicle) -> Vehicle:
control: VehicleControl = self.convert_control_from_source_to_agent(
source.get_control()
)
# this is cheating here, vehicle does not know its own location
transform: Transform = self.convert_transform_from_source_to_agent(
source.get_transform()
)
velocity: Vector3D = self.convert_vector3d_from_source_to_agent(
source.get_velocity()
)
return Vehicle(velocity=velocity, transform=transform, control=control)
def convert_control_from_agent_to_source(
self, control: VehicleControl
) -> carla.VehicleControl:
return carla.VehicleControl(
throttle=abs(control.throttle),
steer=control.steering,
brake=0,
hand_brake=False,
reverse=True if control.throttle < 0 else False,
manual_gear_shift=False,
gear=1,
)
def convert_vector3d_from_agent_to_source(
self, vector3d: Vector3D
) -> carla.Vector3D:
return carla.Vector3D(x=vector3d.x, y=vector3d.y, z=vector3d.z)
def convert_location_from_agent_to_source(self, source: Location) -> carla.Location:
return carla.Location(x=source.x, y=source.z, z=source.y)
def convert_rotation_from_agent_to_source(self, source: Rotation) -> carla.Rotation:
return carla.Rotation(pitch=source.yaw, yaw=source.pitch, roll=source.roll)
def convert_transform_from_agent_to_source(
self, source: Transform
) -> carla.Transform:
return carla.Transform(
location=self.convert_location_from_agent_to_source(source=source.location),
rotation=self.convert_rotation_from_agent_to_source(source=source.rotation),
)
def _to_bgra_array(self, image):
"""Convert a CARLA raw image to a BGRA numpy array."""
if not isinstance(image, carla.Image):
raise ValueError("Argument must be a carla.sensor.Image")
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
return array
def _to_rgb_array(self, image):
"""Convert a CARLA raw image to a RGB numpy array."""
array = self._to_bgra_array(image)
# Convert BGRA to RGB.
array = array[:, :, :3]
# array = array[:, :, ::-1]
return array
| 36.338798 | 89 | 0.64 | [
"Apache-2.0"
] | Amanda-Chiang/ROAR | Bridges/carla_bridge.py | 6,650 | Python |
import importlib
import inspect
import logging
import os
from app.plugin import Hook, Command
PLUGINS_DIR = 'plugins'
def find_plugins():
"""Returns a list of plugin path names."""
for root, dirs, files in os.walk(PLUGINS_DIR):
for file in files:
if file.endswith('.py'):
yield os.path.join(root, file)
def load_plugins(hook_plugins, command_plugins):
"""Populates the plugin lists."""
for file in find_plugins():
try:
module_name = os.path.splitext(os.path.basename(file))[0]
module = importlib.import_module(PLUGINS_DIR + '.' + module_name)
for entry_name in dir(module):
entry = getattr(module, entry_name)
if not inspect.isclass(entry) or inspect.getmodule(entry) != module:
continue
if issubclass(entry, Hook):
hook_plugins.append(entry())
elif issubclass(entry, Command):
command_plugins.append(entry())
except (ImportError, NotImplementedError):
continue
def process_commands(input_obj, commands):
logging.debug('Processing commands')
hook_plugins = []
command_plugins = []
load_plugins(hook_plugins, command_plugins)
for command_str in commands:
for plugin in command_plugins:
if command_str in plugin.names:
for hook in hook_plugins:
hook.before_handle(input_obj, plugin)
input_obj = plugin.handle(input_obj)
for hook in hook_plugins:
hook.after_handle(input_obj, plugin)
| 31.903846 | 84 | 0.607595 | [
"Apache-2.0"
] | glombard/python-plugin-experiment | app/processor.py | 1,659 | Python |
# -*- coding: utf-8 -*-
# TODO: find unused functions and kill them
import logging
import copy
import operator
import utool as ut
import vtool as vt
import numpy as np
import itertools as it
from functools import partial, reduce
from wbia.expt import cfghelpers
from wbia.expt import experiment_helpers
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
def build_cmsinfo(cm_list, qreq_):
r"""
Helper function to report results over multiple queries (chip matches).
Basically given a group of queries of the same name, we only care if one of
them is correct. This emulates encounters.
Runs queries of a specific configuration returns the best rank of each
query.
Args:
cm_list (list): list of chip matches
qreq_ (QueryRequest): request that computed the chip matches.
Returns:
dict: cmsinfo - info about multiple chip matches cm_list
CommandLine:
python -m wbia get_query_result_info
python -m wbia get_query_result_info:0 --db lynx \
-a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1
python -m wbia get_query_result_info:0 --db lynx \
-a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1 --cmd
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> qreq_ = wbia.main_helpers.testdata_qreq_(a=[':qindex=0:3,dindex=0:5'])
>>> cm_list = qreq_.execute()
>>> cmsinfo = build_cmsinfo(cm_list, qreq_)
>>> print(ut.repr2(cmsinfo))
Ignore:
wbia -e rank_cmc --db humpbacks -a :has_any=hasnotch,mingt=2 \
-t :proot=BC_DTW --show --nocache-big
wbia -e rank_cmc --db humpbacks -a :is_known=True,mingt=2 \
-t :pipeline_root=BC_DTW
wbia -e rank_cmc --db humpbacks -a :is_known=True \
-t :pipeline_root=BC_DTW \
--qaid=1,9,15,16,18 --daid-override=1,9,15,16,18,21,22 \
--show --debug-depc
--clear-all-depcache
"""
ibs = qreq_.ibs
qaids = qreq_.qaids
daids = qreq_.daids
# Get the groundtruth ranks and accuracy measures
qx2_cminfo = []
for cm in cm_list:
if hasattr(cm, 'extend_results'):
cminfo = cm.extend_results(qreq_).summarize(qreq_)
else:
cminfo = cm.summarize(qreq_)
qx2_cminfo.append(cminfo)
cmsinfo = ut.dict_stack(qx2_cminfo, 'qx2_')
cmsinfo['qx2_gt_rank'] = ut.replace_nones(cmsinfo['qx2_gt_rank'], -1)
if False:
qx2_gtaids = ibs.get_annot_groundtruth(qaids, daid_list=daids)
qx2_avepercision = np.array(
[
cm.get_average_percision(ibs=ibs, gt_aids=gt_aids)
for (cm, gt_aids) in zip(cm_list, qx2_gtaids)
]
)
cmsinfo['qx2_avepercision'] = qx2_avepercision
# Compute mAP score # TODO: use mAP score
# (Actually map score doesn't make much sense if using name scoring
# mAP = qx2_avepercision[~np.isnan(qx2_avepercision)].mean() # NOQA
qaids = qreq_.qaids
# qaids2 = [cm.qaid for cm in cm_list]
# qnids = qreq_.get_qreq_annot_nids(qaids) # TODO: use new nid getter
qnids = ibs.get_annot_nids(qaids)
unique_dnids = np.unique(ibs.get_annot_nids(qreq_.daids))
unique_qnids, groupxs = ut.group_indices(qnids)
cm_group_list = ut.apply_grouping(cm_list, groupxs)
qnid2_aggnamescores = {}
qnx2_nameres_info = []
# Ranked list aggregation over groups of query annots
nameres_info_list = []
for qnid, cm_group in zip(unique_qnids, cm_group_list):
nid2_name_score_group = [
dict([(nid, cm.name_score_list[nidx]) for nid, nidx in cm.nid2_nidx.items()])
for cm in cm_group
]
aligned_name_scores = np.array(
[
ut.dict_take(nid_to_name_score, unique_dnids.tolist(), -np.inf)
for nid_to_name_score in nid2_name_score_group
]
).T
name_score_list = np.nanmax(aligned_name_scores, axis=1)
qnid2_aggnamescores[qnid] = name_score_list
# sort
sortx = name_score_list.argsort()[::-1]
sorted_namescores = name_score_list[sortx]
sorted_dnids = unique_dnids[sortx]
# infer agg name results
success = sorted_dnids == qnid
failure = np.logical_and(~success, sorted_dnids > 0)
gt_name_rank = None if not np.any(success) else np.where(success)[0][0]
gf_name_rank = None if not np.any(failure) else np.nonzero(failure)[0][0]
gt_nid = sorted_dnids[gt_name_rank]
gf_nid = sorted_dnids[gf_name_rank]
gt_name_score = sorted_namescores[gt_name_rank]
gf_name_score = sorted_namescores[gf_name_rank]
if gt_name_score <= 0:
# ensure failure cases are loud give them the worst possible rank
# instead of a random one.
if hasattr(qreq_, 'dnids'):
gt_name_rank = len(qreq_.dnids) + 1
else:
dnids = list(set(ibs.get_annot_nids(qreq_.daids)))
gt_name_rank = len(dnids) + 1
qnx2_nameres_info = {}
qnx2_nameres_info['qnid'] = qnid
qnx2_nameres_info['gt_nid'] = gt_nid
qnx2_nameres_info['gf_nid'] = gf_nid
qnx2_nameres_info['gt_name_rank'] = gt_name_rank
qnx2_nameres_info['gf_name_rank'] = gf_name_rank
qnx2_nameres_info['gt_name_score'] = gt_name_score
qnx2_nameres_info['gf_name_score'] = gf_name_score
nameres_info_list.append(qnx2_nameres_info)
nameres_info = ut.dict_stack(nameres_info_list, 'qnx2_')
cmsinfo.update(nameres_info)
return cmsinfo
def combine_testres_list(ibs, testres_list):
"""
combine test results over multiple annot configs
The combination of pipeline and annotation config is indexed by cfgx.
A cfgx corresponds to a unique query request
CommandLine:
python -m wbia --tf combine_testres_list
python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show
python -m wbia --tf -draw_rank_cmc --db PZ_Master1 --show
python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default
python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.expt import harness
>>> ibs, testres = harness.testdata_expts('PZ_MTEST', ['varysize'])
"""
import copy
from wbia.expt import annotation_configs
acfg_list = [tr.acfg for tr in testres_list]
acfg_lbl_list = annotation_configs.get_varied_acfg_labels(acfg_list)
flat_acfg_list = annotation_configs.flatten_acfg_list(acfg_list)
nonvaried_acfg, varied_acfg_list = ut.partition_varied_cfg_list(flat_acfg_list)
def combine_lbls(lbl, acfg_lbl):
if len(lbl) == 0:
return acfg_lbl
if len(acfg_lbl) == 0:
return lbl
return lbl + '+' + acfg_lbl
# TODO: depcirate cfg_dict list for pcfg_list (I think)
agg_cfg_list = ut.flatten([tr.cfg_list for tr in testres_list])
agg_cfgx2_qreq_ = ut.flatten([tr.cfgx2_qreq_ for tr in testres_list])
agg_cfgdict_list = ut.flatten([tr.cfgdict_list for tr in testres_list])
agg_cfgx2_cmsinfo = ut.flatten([tr.cfgx2_cmsinfo for tr in testres_list])
agg_varied_acfg_list = ut.flatten(
[[acfg] * len(tr.cfg_list) for tr, acfg in zip(testres_list, varied_acfg_list)]
)
agg_cfgx2_lbls = ut.flatten(
[
[combine_lbls(lbl, acfg_lbl) for lbl in tr.cfgx2_lbl]
for tr, acfg_lbl in zip(testres_list, acfg_lbl_list)
]
)
agg_cfgx2_acfg = ut.flatten(
[
[copy.deepcopy(acfg)] * len(tr.cfg_list)
for tr, acfg in zip(testres_list, acfg_list)
]
)
big_testres = TestResult(
agg_cfg_list, agg_cfgx2_lbls, agg_cfgx2_cmsinfo, agg_cfgx2_qreq_
)
# Give the big test result an acfg that is common between everything
big_testres.acfg = annotation_configs.unflatten_acfgdict(nonvaried_acfg)
# TODO: cfgdict_list -> pcfg_list
big_testres.cfgdict_list = agg_cfgdict_list # TODO: depricate
big_testres.common_acfg = annotation_configs.compress_aidcfg(big_testres.acfg)
big_testres.common_cfgdict = reduce(ut.dict_intersection, big_testres.cfgdict_list)
big_testres.varied_acfg_list = agg_varied_acfg_list
big_testres.nonvaried_acfg = nonvaried_acfg
big_testres.varied_cfg_list = [
ut.delete_dict_keys(cfgdict.copy(), list(big_testres.common_cfgdict.keys()))
for cfgdict in big_testres.cfgdict_list
]
big_testres.acfg_list = acfg_list
big_testres.cfgx2_acfg = agg_cfgx2_acfg
big_testres.cfgx2_pcfg = agg_cfgdict_list
assert len(agg_cfgdict_list) == len(agg_cfgx2_acfg)
# big_testres.acfg
testres = big_testres
# big_testres = testres
return testres
@ut.reloadable_class
class TestResult(ut.NiceRepr):
"""
CommandLine:
export SMK_PIPE="smk:nwords=[64000],sv=[False]"
wbia TestResult --db PZ_MTEST -a ctrl -p $SMK_PIPE
wbia TestResult --db Oxford -a oxford -p $SMK_PIPE
Example:
>>> # Script
>>> from wbia.init import main_helpers
>>> import utool as ut
>>> ibs, testres = main_helpers.testdata_expts()
>>> testres.help()
>>> actions = testres.get_actions()
>>> testres.map_score()
>>> ut.qtensure()
>>> prompt = ut.InteractivePrompt(actions)
>>> prompt.loop()
"""
def __init__(testres, cfg_list, cfgx2_lbl, cfgx2_cmsinfo, cfgx2_qreq_):
assert len(cfg_list) == len(cfgx2_lbl), 'bad lengths1: %r != %r' % (
len(cfg_list),
len(cfgx2_lbl),
)
assert len(cfgx2_qreq_) == len(cfgx2_lbl), 'bad lengths2: %r != %r' % (
len(cfgx2_qreq_),
len(cfgx2_lbl),
)
assert len(cfgx2_cmsinfo) == len(cfgx2_lbl), 'bad lengths3: %r != %r' % (
len(cfgx2_cmsinfo),
len(cfgx2_lbl),
)
# TODO rename cfg_list to pcfg_list
testres.cfg_list = cfg_list
testres.cfgx2_lbl = cfgx2_lbl
testres.cfgx2_cmsinfo = cfgx2_cmsinfo
testres.cfgx2_qreq_ = cfgx2_qreq_
# TODO: uncomment
# testres.cfgx2_acfg
# testres.cfgx2_qcfg
# testres.acfg_list = None #
testres.lbl = None
testres.testnameid = None
@classmethod
def from_cms(cls, cm_list, qreq_):
cfg_list = [qreq_.qparams] # should actually be the specified dict
cfgx2_lbl = ['unspecified']
cmsinfo = build_cmsinfo(cm_list, qreq_)
cfgx2_cmsinfo = [cmsinfo]
cfgx2_qreq_ = [qreq_]
testres = cls(cfg_list, cfgx2_lbl, cfgx2_cmsinfo, cfgx2_qreq_)
return testres
def __str__(testres):
return testres.reconstruct_test_flags()
# def __repr__(testres):
# return testres._custom_str()
def __nice__(testres):
dbname = None if testres.ibs is None else testres.ibs.get_dbname()
# hashkw = dict(_new=True, pathsafe=False)
infostr_ = 'nCfg=%s' % testres.nConfig
if testres.nConfig == 1:
qreq_ = testres.cfgx2_qreq_[0]
infostr_ += ' nQ=%s, nD=%s %s' % (
len(qreq_.qaids),
len(qreq_.daids),
qreq_.get_pipe_hashid(),
)
# nD=%s %s' % (, len(testres.daids), testres.get_pipe_hashid())
nice = '%s %s' % (dbname, infostr_)
return nice
@property
def ibs(testres):
ibs_list = []
for qreq_ in testres.cfgx2_qreq_:
try:
ibs_list.append(qreq_.ibs)
except AttributeError:
ibs_list.append(qreq_.depc.controller)
ibs = ibs_list[0]
for ibs_ in ibs_list:
assert (
ibs.get_dbdir() == ibs_.get_dbdir()
), 'all requests must use the same database'
return ibs
@property
def qaids(testres):
assert (
testres.has_constant_qaids()
), 'must have constant qaids to use this property'
return testres.cfgx2_qaids[0]
# return testres._qaids
@property
def nConfig(testres):
# FIXME: this is the number of requests not the number of
# pipeline configurations
return len(testres.cfg_list)
@property
def unique_pcfgs(testres):
unique_idxs = ut.unique_indices(map(id, testres.cfgx2_pcfg))
return ut.take(testres.cfgx2_pcfg, unique_idxs)
@property
def nQuery(testres):
return len(testres.qaids)
@property
def rank_mat(testres):
return testres.get_rank_mat()
@property
def cfgx2_daids(testres):
daids_list = [qreq_.daids for qreq_ in testres.cfgx2_qreq_]
return daids_list
@property
def cfgx2_qaids(testres):
qaids_list = [qreq_.qaids for qreq_ in testres.cfgx2_qreq_]
return qaids_list
def has_constant_daids(testres):
return ut.allsame(testres.cfgx2_daids)
def has_constant_qaids(testres):
return ut.allsame(testres.cfgx2_qaids)
def has_constant_length_daids(testres):
return ut.allsame(list(map(len, testres.cfgx2_daids)))
def has_constant_length_qaids(testres):
return ut.allsame(list(map(len, testres.cfgx2_qaids)))
def get_infoprop_list(testres, key, qaids=None):
"""
key = 'qx2_gt_rank'
key = 'qx2_gt_rank'
qaids = testres.get_test_qaids()
"""
if key == 'participant':
# Get if qaids are part of the config
cfgx2_infoprop = [np.in1d(qaids, aids_) for aids_ in testres.cfgx2_qaids]
else:
_tmp1_cfgx2_infoprop = ut.get_list_column(testres.cfgx2_cmsinfo, key)
_tmp2_cfgx2_infoprop = list(
map(np.array, ut.util_list.replace_nones(_tmp1_cfgx2_infoprop, np.nan))
)
if qaids is None:
cfgx2_infoprop = _tmp2_cfgx2_infoprop
else:
# Use nan if the aid doesnt exist
cfgx2_qaid2_qx = [
dict(zip(aids_, range(len(aids_)))) for aids_ in testres.cfgx2_qaids
]
qxs_list = [
ut.dict_take(qaid2_qx, qaids, None) for qaid2_qx in cfgx2_qaid2_qx
]
cfgx2_infoprop = [
[np.nan if x is None else props[x] for x in qxs]
for props, qxs in zip(_tmp2_cfgx2_infoprop, qxs_list)
]
if key == 'qx2_gt_rank' or key.endswith('_rank'):
# hack
wpr = testres.get_worst_possible_rank()
cfgx2_infoprop = [
np.array([wpr if rank == -1 else rank for rank in infoprop])
for infoprop in cfgx2_infoprop
]
return cfgx2_infoprop
def get_infoprop_mat(testres, key, qaids=None):
"""
key = 'qx2_gf_raw_score'
key = 'qx2_gt_raw_score'
"""
cfgx2_infoprop = testres.get_infoprop_list(key, qaids)
# concatenate each query rank across configs
infoprop_mat = np.vstack(cfgx2_infoprop).T
return infoprop_mat
@ut.memoize
def get_rank_mat(testres, qaids=None):
# Ranks of Best Results
rank_mat = testres.get_infoprop_mat(key='qx2_gt_rank', qaids=qaids)
return rank_mat
def get_worst_possible_rank(testres):
# worst_possible_rank = max(9001, len(testres.daids) + 1)
worst_possible_rank = max([len(qreq_.daids) for qreq_ in testres.cfgx2_qreq_]) + 1
# worst_possible_rank = len(testres.daids) + 1
return worst_possible_rank
def get_rank_histograms(testres, bins=None, key=None, join_acfgs=False):
"""
Ignore:
testres.get_infoprop_mat('qnx2_gt_name_rank')
testres.get_infoprop_mat('qnx2_gf_name_rank')
testres.get_infoprop_mat('qnx2_qnid')
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('testdb1', a=['default'])
>>> bins = 'dense'
>>> key = 'qnx2_gt_name_rank'
>>> config_hists = testres.get_rank_histograms(bins, key=key)
"""
if key is None:
key = 'qx2_gt_rank'
# key = 'qnx2_gt_name_rank'
if bins is None:
bins = testres.get_rank_histogram_bins()
elif bins == 'dense':
bins = np.arange(testres.get_worst_possible_rank() + 1)
cfgx2_ranks = testres.get_infoprop_list(key=key)
# Use numpy histogram repr
cfgx2_hist = np.zeros((len(cfgx2_ranks), len(bins) - 1), dtype=np.int32)
for cfgx, ranks in enumerate(cfgx2_ranks):
freq = np.histogram(ranks, bins=bins)[0]
cfgx2_hist[cfgx] = freq
if join_acfgs:
# Hack for turtles / general way of doing cross validation
# however, we need to change the name
groupxs = testres.get_cfgx_groupxs()
cfgx2_hist = np.array(
[
np.sum(group, axis=0)
for group in ut.apply_grouping(cfgx2_hist, groupxs)
]
)
return cfgx2_hist, bins
def get_rank_percentage_cumhist(testres, bins='dense', key=None, join_acfgs=False):
r"""
Args:
bins (unicode): (default = u'dense')
key (None): (default = None)
join_acfgs (bool): (default = False)
Returns:
tuple: (config_cdfs, edges)
CommandLine:
python -m wbia --tf TestResult.get_rank_percentage_cumhist
python -m wbia --tf TestResult.get_rank_percentage_cumhist \
-t baseline -a unctrl ctrl
python -m wbia --tf TestResult.get_rank_percentage_cumhist \
--db lynx \
-a default:qsame_imageset=True,been_adjusted=True,excluderef=True \
-t default:K=1 --show --cmd
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'testdb1', a=['default:num_names=1,name_offset=[0,1]'])
>>> bins = u'dense'
>>> key = None
>>> (config_cdfs, edges) = testres.get_rank_percentage_cumhist(bins)
>>> result = ('(config_cdfs, edges) = %s' % (str((config_cdfs, edges)),))
>>> print(result)
"""
cfgx2_hist, edges = testres.get_rank_histograms(
bins, key=key, join_acfgs=join_acfgs
)
cfgx2_cumhist = np.cumsum(cfgx2_hist, axis=1)
cfgx2_cumhist_percent = 100 * cfgx2_cumhist / cfgx2_cumhist.T[-1].T[:, None]
return cfgx2_cumhist_percent, edges
def get_cfgx_groupxs(testres):
r"""
Returns the group indices of configurations specified to be joined.
Ignore:
a = [
'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',
'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',
'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',
'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',
]
>>> a = [
>>> 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',
>>> 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',
>>> 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',
>>> 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',
>>> ]
>>> from wbia.init import main_helpers
>>> #a = 'default:minqual=good,require_timestamp=True,crossval_enc=True,view=[right,left]'
>>> t = 'default:K=[1]'
>>> ibs, testres = main_helpers.testdata_expts('WWF_Lynx_Copy', a=a, t=t)
>>> testres.get_cfgx_groupxs()
ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids).nids)) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))
ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids))) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))
Example:
>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'PZ_MTEST',
>>> a=['default:qnum_names=1,qname_offset=[0,1],joinme=1,dpername=1',
>>> 'default:qsize=1,dpername=[1,2]'],
>>> t=['default:K=[1,2]'])
>>> groupxs = testres.get_cfgx_groupxs()
>>> result = groupxs
>>> print(result)
[[6], [4], [0, 2], [7], [5], [1, 3]]
"""
# Group-ids for annotations are determined by joinme labels
# (used primarilly in cross-validation)
acfg_joinid = [acfg['qcfg']['joinme'] for acfg in testres.cfgx2_acfg]
# Anything that does not have a joinme groupid is standalone and must
# be given a unique groupid
gen_groupid = it.count(-1, step=-1)
acfg_groupids = [
next(gen_groupid) if grpid is None else grpid for grpid in acfg_joinid
]
# Ensure that different pipeline configs are in different groups
pcfg_groupids = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg)
cfg_groupids = list(zip(pcfg_groupids, acfg_groupids))
groupxs = ut.group_indices(cfg_groupids)[1]
return groupxs
def get_rank_histogram_bins(testres):
"""easy to see histogram bins"""
worst_possible_rank = testres.get_worst_possible_rank()
if worst_possible_rank > 50:
bins = [0, 1, 5, 50, worst_possible_rank, worst_possible_rank + 1]
elif worst_possible_rank > 5:
bins = [0, 1, 5, worst_possible_rank, worst_possible_rank + 1]
else:
bins = [0, 1, 5]
return bins
def get_X_LIST(testres):
"""DEPRICATE or refactor"""
# X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1])
X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1, 5])
return X_LIST
def get_nLessX_dict(testres):
"""
Build a (histogram) dictionary mapping X (as in #ranks < X) to a list
of cfg scores
"""
X_LIST = testres.get_X_LIST()
nLessX_dict = {int(X): np.zeros(testres.nConfig) for X in X_LIST}
cfgx2_qx2_gt_rank = testres.get_infoprop_list('qx2_gt_rank')
for X in X_LIST:
cfgx2_lessX_mask = [
np.logical_and(0 <= qx2_gt_ranks, qx2_gt_ranks < X)
for qx2_gt_ranks in cfgx2_qx2_gt_rank
]
cfgx2_nLessX = np.array([lessX_.sum(axis=0) for lessX_ in cfgx2_lessX_mask])
nLessX_dict[int(X)] = cfgx2_nLessX
return nLessX_dict
def get_all_varied_params(testres):
r"""
Returns the parameters that were varied between different
configurations in this test
Returns:
list: varied_params
CommandLine:
python -m wbia TestResult.get_all_varied_params
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> testres = wbia.testdata_expts(
>>> 'PZ_MTEST', t='default:K=[1,2]')[1]
>>> varied_params = sorted(testres.get_all_varied_params())
>>> result = ('varied_params = %s' % (ut.repr2(varied_params),))
>>> print(result)
varied_params = ['K', '_cfgindex']
"""
# only for big results
varied_cfg_params = list(
set(ut.flatten([cfgdict.keys() for cfgdict in testres.varied_cfg_list]))
)
varied_acfg_params = list(
set(ut.flatten([acfg.keys() for acfg in testres.varied_acfg_list]))
)
varied_params = varied_acfg_params + varied_cfg_params
return varied_params
def get_total_num_varied_params(testres):
return len(testres.get_all_varied_params())
def get_param_basis(testres, key):
"""
Returns what a param was varied between over all tests
key = 'K'
key = 'dcfg_sample_size'
"""
if key == 'len(daids)':
basis = sorted(list(set([len(daids) for daids in testres.cfgx2_daids])))
elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):
basis = sorted(
list(set([cfgdict[key] for cfgdict in testres.varied_cfg_list]))
)
elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):
basis = sorted(list(set([acfg[key] for acfg in testres.varied_acfg_list])))
else:
# assert False, 'param is not varied'
if key in testres.common_cfgdict:
basis = [testres.common_cfgdict[key]]
elif key in testres.nonvaried_acfg:
basis = [testres.nonvaried_acfg[key]]
else:
assert False, 'param=%r doesnt exist' % (key,)
return basis
def get_param_val_from_cfgx(testres, cfgx, key):
if key == 'len(daids)':
return len(testres.cfgx2_daids[cfgx])
# --- HACK - the keys are different in varied dict for some reason ---
elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):
return testres.varied_cfg_list[cfgx][key]
elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):
return testres.varied_acfg_list[cfgx][key]
# --- / Hack
elif any([key in cfgdict for cfgdict in testres.cfgx2_pcfg]):
return testres.cfgx2_pcfg[cfgx][key]
elif any([key in cfgdict for cfgdict in testres.cfgx2_acfg]):
return testres.cfgx2_acfg[cfgx][key]
else:
assert False, 'param=%r doesnt exist' % (key,)
def get_cfgx_with_param(testres, key, val):
"""
Gets configs where the given parameter is held constant
"""
if key == 'len(daids)':
cfgx_list = [
cfgx
for cfgx, daids in enumerate(testres.cfgx2_daids)
if len(daids) == val
]
elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):
cfgx_list = [
cfgx
for cfgx, cfgdict in enumerate(testres.varied_cfg_list)
if cfgdict[key] == val
]
elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):
cfgx_list = [
cfgx
for cfgx, acfg in enumerate(testres.varied_acfg_list)
if acfg[key] == val
]
else:
if key in testres.common_cfgdict:
cfgx_list = list(range(testres.nConfig))
elif key in testres.nonvaried_acfg:
cfgx_list = list(range(testres.nConfig))
else:
assert False, 'param=%r doesnt exist' % (key,)
# assert False, 'param is not varied'
return cfgx_list
def get_pipecfg_args(testres):
if '_cfgstr' in testres.common_cfgdict:
pipecfg_args = [testres.common_cfgdict['_cfgstr']]
else:
pipecfg_args = ut.unique_ordered(
[cfg['_cfgstr'] for cfg in testres.varied_cfg_list]
)
return ' '.join(pipecfg_args)
def get_annotcfg_args(testres):
"""
CommandLine:
# TODO: More robust fix
# To reproduce the error
wbia -e rank_cmc --db humpbacks_fb -a default:mingt=2,qsize=10,dsize=100 default:qmingt=2,qsize=10,dsize=100 -t default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_score_weight=0.5 --show
"""
if '_cfgstr' in testres.common_acfg['common']:
annotcfg_args = [testres.common_acfg['common']['_cfgstr']]
else:
try:
annotcfg_args = ut.unique_ordered(
[acfg['common']['_cfgstr'] for acfg in testres.varied_acfg_list]
)
except KeyError:
# HACK FIX
try:
annotcfg_args = ut.unique_ordered(
[acfg['_cfgstr'] for acfg in testres.varied_acfg_list]
)
except KeyError:
annotcfg_args = ut.unique_ordered(
[acfg['qcfg__cfgstr'] for acfg in testres.varied_acfg_list]
)
return ' '.join(annotcfg_args)
def reconstruct_test_flags(testres):
flagstr = ' '.join(
[
'-a ' + testres.get_annotcfg_args(),
'-t ' + testres.get_pipecfg_args(),
'--db ' + testres.ibs.get_dbname(),
]
)
return flagstr
def get_full_cfgstr(testres, cfgx):
"""both qannots and dannots included"""
full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr()
return full_cfgstr
@ut.memoize
def get_cfgstr(testres, cfgx):
"""just dannots and config_str"""
cfgstr = testres.cfgx2_qreq_[cfgx].get_cfgstr()
return cfgstr
def _shorten_lbls(testres, lbl):
"""
hacky function
"""
import re
repl_list = [
('candidacy_', ''),
('viewpoint_compare', 'viewpoint'),
# ('custom', 'default'),
# ('fg_on', 'FG'),
# ('fg_on=True', 'FG'),
# ('fg_on=False,?', ''),
('fg_on=True', 'FG=True'),
('fg_on=False,?', 'FG=False'),
('lnbnn_on=True', 'LNBNN'),
('lnbnn_on=False,?', ''),
('normonly_on=True', 'normonly'),
('normonly_on=False,?', ''),
('bar_l2_on=True', 'dist'),
('bar_l2_on=False,?', ''),
(r'joinme=\d+,?', ''),
('dcrossval_enc', 'denc_per_name'),
('sv_on', 'SV'),
('rotation_invariance', 'RI'),
('affine_invariance', 'AI'),
('query_rotation_heuristic', 'QRH'),
('nNameShortlistSVER', 'nRR'),
#
# ('sample_per_ref_name', 'per_ref_name'),
('sample_per_ref_name', 'per_gt_name'),
('require_timestamp=True', 'require_timestamp'),
('require_timestamp=False,?', ''),
('require_timestamp=None,?', ''),
('[_A-Za-z]*=None,?', ''),
('dpername=None,?', ''),
# ???
# ('sample_per_ref_name', 'per_gt_name'),
# ('per_name', 'per_gf_name'), # Try to make labels clearer for paper
# ----
# ('prescore_method=\'?csum\'?,score_method=\'?csum\'?,?', 'amech'),
# ('prescore_method=\'?nsum\'?,score_method=\'?nsum\'?,?', 'fmech'),
("prescore_method='?csum'?,score_method='?csum'?,?", 'mech=annot'),
("prescore_method='?nsum'?,score_method='?nsum'?,?", 'mech=name'),
('force_const_size=[^,]+,?', ''),
(r'[dq]?_true_size=\d+,?', ''),
(r'[dq]?_orig_size=[^,]+,?', ''),
# Hack
(
'[qd]?exclude_reference='
+ ut.regex_or(['True', 'False', 'None'])
+ r'\,?',
'',
),
# ('=True', '=On'),
# ('=False', '=Off'),
('=True', '=T'),
('=False', '=F'),
(',$', ''),
]
for ser, rep in repl_list:
lbl = re.sub(ser, rep, lbl)
return lbl
def get_short_cfglbls(testres, join_acfgs=False):
"""
Labels for published tables
cfg_lbls = ['baseline:nRR=200+default:', 'baseline:+default:']
CommandLine:
python -m wbia --tf TestResult.get_short_cfglbls
Example:
>>> # SLOW_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl:size=10'],
>>> t=['default:dim_size=[450,550]'])
>>> cfg_lbls = testres.get_short_cfglbls()
>>> result = ('cfg_lbls = %s' % (ut.repr2(cfg_lbls),))
>>> print(result)
cfg_lbls = [
'default:dim_size=450+ctrl',
'default:dim_size=550+ctrl',
]
"""
from wbia.expt import annotation_configs
if False:
acfg_names = [acfg['qcfg']['_cfgstr'] for acfg in testres.cfgx2_acfg]
pcfg_names = [pcfg['_cfgstr'] for pcfg in testres.cfgx2_pcfg]
# Only vary the label settings within the cfgname
acfg_hashes = np.array(list(map(hash, acfg_names)))
unique_hashes, a_groupxs = vt.group_indices(acfg_hashes)
a_label_groups = []
for groupx in a_groupxs:
acfg_list = ut.take(testres.cfgx2_acfg, groupx)
varied_lbls = annotation_configs.get_varied_acfg_labels(
acfg_list, mainkey='_cfgstr'
)
a_label_groups.append(varied_lbls)
acfg_lbls = vt.invert_apply_grouping(a_label_groups, a_groupxs)
pcfg_hashes = np.array(list(map(hash, pcfg_names)))
unique_hashes, p_groupxs = vt.group_indices(pcfg_hashes)
p_label_groups = []
for groupx in p_groupxs:
pcfg_list = ut.take(testres.cfgx2_pcfg, groupx)
varied_lbls = ut.get_varied_cfg_lbls(pcfg_list, mainkey='_cfgstr')
p_label_groups.append(varied_lbls)
pcfg_lbls = vt.invert_apply_grouping(p_label_groups, p_groupxs)
cfg_lbls = [albl + '+' + plbl for albl, plbl in zip(acfg_lbls, pcfg_lbls)]
else:
cfg_lbls_ = testres.cfgx2_lbl[:]
cfg_lbls_ = [testres._shorten_lbls(lbl) for lbl in cfg_lbls_]
# split configs up by param and annots
pa_tups = [lbl.split('+') for lbl in cfg_lbls_]
cfg_lbls = []
for pa in pa_tups:
new_parts = []
for part in pa:
_tup = part.split(ut.NAMEVARSEP)
name, settings = _tup if len(_tup) > 1 else (_tup[0], '')
new_parts.append(part if settings else name)
if len(new_parts) == 2 and new_parts[1] == 'default':
newlbl = new_parts[0]
else:
newlbl = '+'.join(new_parts)
cfg_lbls.append(newlbl)
if join_acfgs:
groupxs = testres.get_cfgx_groupxs()
group_lbls = []
for group in ut.apply_grouping(cfg_lbls, groupxs):
num_parts = 0
part_dicts = []
for lbl in group:
parts = []
for count, pa in enumerate(lbl.split('+')):
num_parts = max(num_parts, count + 1)
cfgdict = cfghelpers.parse_cfgstr_list2([pa], strict=False)[0][0]
parts.append(cfgdict)
part_dicts.append(parts)
group_lbl_parts = []
for px in range(num_parts):
cfgs = ut.take_column(part_dicts, px)
nonvaried_cfg = ut.partition_varied_cfg_list(cfgs)[0]
group_lbl_parts.append(ut.get_cfg_lbl(nonvaried_cfg))
# logger.info('nonvaried_lbl = %r' % (nonvaried_lbl,))
group_lbl = '+'.join(group_lbl_parts)
group_lbls.append(group_lbl)
cfg_lbls = group_lbls
return cfg_lbls
def get_varied_labels(testres, shorten=False, join_acfgs=False, sep=''):
"""
Returns labels indicating only the parameters that have been varied between
different annot/pipeline configurations.
Helper for consistent figure titles
CommandLine:
python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores
python -m wbia --tf TestResult.make_figtitle
python -m wbia TestResult.get_varied_labels
Example:
>>> # SLOW_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts(
>>> 'PZ_MTEST', t='default:K=[1,2]',
>>> #a=['timectrl:qsize=[1,2],dsize=[3,4]']
>>> a=[
>>> 'default:qsize=[1,2],dsize=2,joinme=1,view=left',
>>> 'default:qsize=2,dsize=3,joinme=1,view=primary',
>>> 'default:qsize=[3,2],dsize=4,joinme=2,view=left',
>>> 'default:qsize=4,dsize=5,joinme=2,view=primary',
>>> ]
>>> )
>>> # >>> ibs, testres = wbia.testdata_expts(
>>> # >>> 'WWF_Lynx_Copy', t='default:K=1',
>>> # >>> a=[
>>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=2,joinme=2',
>>> # >>> #'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=3,joinme=3',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=2,joinme=2',
>>> # >>> #'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=3,joinme=3',
>>> # >>> ]
>>> # >>> )
>>> varied_lbls = testres.get_varied_labels(shorten=False, join_acfgs=True)
>>> result = ('varied_lbls = %s' % (ut.repr2(varied_lbls, strvals=True, nl=2),))
>>> print(result)
varied_lbls = [u'K=1+qsize=1', u'K=2+qsize=1', u'K=1+qsize=2', u'K=2+qsize=2']
"""
from wbia.expt import annotation_configs
varied_acfgs = annotation_configs.get_varied_acfg_labels(
testres.cfgx2_acfg, checkname=True
)
# logger.info('varied_acfgs = %s' % (ut.repr2(varied_acfgs, nl=2),))
# logger.info('testres.cfgx2_acfg = %s' % (ut.repr3(testres.cfgx2_acfg),))
varied_pcfgs = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg, checkname=True)
# varied_acfgs = ut.get_varied_cfg_lbls(testres.cfgx2_acfg, checkname=True)
name_sep = ':'
cfg_sep = '+'
if join_acfgs:
# Hack for the grouped config problem
new_varied_acfgs = []
groupxs = testres.get_cfgx_groupxs()
grouped_acfgs = ut.apply_grouping(varied_acfgs, groupxs)
grouped_pcfgs = ut.apply_grouping(varied_pcfgs, groupxs)
for group in grouped_acfgs:
group = [p if name_sep in p else name_sep + p for p in group]
# Re-parse given back into dictionary form
cfgdicts_ = cfghelpers.parse_cfgstr_list2(group, strict=False)
# I forget why these are stored in a 2d-list
cfgdicts = ut.take_column(cfgdicts_, 0)
new_acfgs = ut.partition_varied_cfg_list(cfgdicts)
# Hack, just taking the first one that has agreement between
# joinme / crossvalidation runs
new_acfg = new_acfgs[0]
if True:
# look at internal variance within xval runs
internal_cfgs = new_acfgs[1]
import pandas as pd
intern_variations = pd.DataFrame.from_dict(internal_cfgs).to_dict(
orient='list'
)
op_prefixes = {
'sum': (np.sum, 'Σ-', ''),
'mean': (np.mean, 'µ-', ''),
'set': (lambda x: '&'.join(set(map(str, x))), '', 's'),
}
known_modes = {
'dsize': 'mean',
'qsize': 'sum',
'view': 'set',
}
for key in intern_variations.keys():
if key.startswith('_'):
continue
mode = known_modes.get(key, None)
vals = intern_variations[key]
if mode is None:
mode = 'set'
if key == 'crossval_idx':
new_acfg['folds'] = len(intern_variations['crossval_idx'])
else:
op, pref, suff = op_prefixes[mode]
c = op(vals)
if isinstance(c, str):
new_acfg[pref + key + suff] = c
else:
new_acfg[pref + key + suff] = ut.repr2(c, precision=2)
# if 'dsize' in intern_variations:
# new_acfg['µ-dsize'] = np.sum(intern_variations['dsize'])
# if 'qsize' in intern_variations:
# new_acfg['Σ-qsize'] = np.sum(intern_variations['qsize'])
# if 'view' in intern_variations:
# new_acfg['views'] = '&'.join(set(intern_variations['view']))
# if 'crossval_idx' in intern_variations:
# new_acfg['folds'] = len(intern_variations['crossval_idx'])
new_varied_acfgs.append(new_acfg)
# Do one more dup check to remove the duplicate summaries
common_new_acfg = ut.partition_varied_cfg_list(new_varied_acfgs)[0]
for key in common_new_acfg.keys():
if not key.startswith('_'):
for new_acfg in new_varied_acfgs:
del new_acfg[key]
varied_pcfgs = ut.take_column(grouped_pcfgs, 0)
varied_acfgs = [
ut.get_cfg_lbl(new_acfg_, with_name=False, sep=sep)
for new_acfg_ in new_varied_acfgs
]
def combo_lbls(lbla, lblp):
parts = []
if lbla != name_sep and lbla:
parts.append(lbla)
if lblp != name_sep and lblp:
parts.append(lblp)
return (sep + cfg_sep).join(parts)
varied_lbls = [
combo_lbls(lbla, lblp) for lblp, lbla in zip(varied_acfgs, varied_pcfgs)
]
if shorten:
varied_lbls = [testres._shorten_lbls(lbl) for lbl in varied_lbls]
return varied_lbls
def get_sorted_config_labels(testres):
"""
helper
"""
key = 'qx2_gt_rank'
cfgx2_cumhist_percent, edges = testres.get_rank_percentage_cumhist(
bins='dense', key=key
)
label_list = testres.get_short_cfglbls()
label_list = [
('%6.2f%%' % (percent,))
# ut.scalar_str(percent, precision=2)
+ ' - ' + label
for percent, label in zip(cfgx2_cumhist_percent.T[0], label_list)
]
sortx = cfgx2_cumhist_percent.T[0].argsort()[::-1]
label_list = ut.take(label_list, sortx)
return label_list
def make_figtitle(testres, plotname='', filt_cfg=None):
"""
Helper for consistent figure titles
CommandLine:
python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores
python -m wbia --tf TestResult.make_figtitle
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST')
>>> plotname = ''
>>> figtitle = testres.make_figtitle(plotname)
>>> result = ('figtitle = %r' % (figtitle,))
>>> print(result)
"""
figtitle_prefix = ut.get_argval('--prefix', type_=str, default='')
if figtitle_prefix != '':
figtitle_prefix = figtitle_prefix.rstrip() + ' '
figtitle = figtitle_prefix + plotname
hasprefix = figtitle_prefix == ''
if hasprefix:
figtitle += '\n'
title_aug = testres.get_title_aug(friendly=True, with_cfg=hasprefix)
figtitle += ' ' + title_aug
if filt_cfg is not None:
filt_cfgstr = ut.get_cfg_lbl(filt_cfg)
if filt_cfgstr.strip() != ':':
figtitle += ' ' + filt_cfgstr
return figtitle
def get_title_aug(
testres, with_size=True, with_db=True, with_cfg=True, friendly=False
):
r"""
Args:
with_size (bool): (default = True)
Returns:
str: title_aug
CommandLine:
python -m wbia --tf TestResult.get_title_aug --db PZ_Master1 -a timequalctrl::timectrl
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST')
>>> with_size = True
>>> title_aug = testres.get_title_aug(with_size)
>>> res = u'title_aug = %s' % (title_aug,)
>>> print(res)
"""
ibs = testres.ibs
title_aug = ''
if with_db:
title_aug += 'db=' + (ibs.get_dbname())
if with_cfg:
try:
if '_cfgname' in testres.common_acfg['common']:
try:
annot_cfgname = testres.common_acfg['common']['_cfgstr']
except KeyError:
annot_cfgname = testres.common_acfg['common']['_cfgname']
else:
cfgname_list = [
cfg['dcfg__cfgname'] for cfg in testres.varied_acfg_list
]
cfgname_list = ut.unique_ordered(cfgname_list)
annot_cfgname = '[' + ','.join(cfgname_list) + ']'
try:
pipeline_cfgname = testres.common_cfgdict['_cfgstr']
except KeyError:
# pipeline_cfgname = testres.common_cfgdict['_cfgname']
cfgstr_list = [cfg['_cfgstr'] for cfg in testres.varied_cfg_list]
uniuqe_cfgstrs = ut.unique_ordered(cfgstr_list)
pipeline_cfgname = '[' + ','.join(uniuqe_cfgstrs) + ']'
annot_cfgname = testres._shorten_lbls(annot_cfgname)
pipeline_cfgname = testres._shorten_lbls(pipeline_cfgname)
# hack turn these off if too long
if len(annot_cfgname) < 64:
title_aug += ' a=' + annot_cfgname
if len(pipeline_cfgname) < 64:
title_aug += ' t=' + pipeline_cfgname
except Exception as ex:
logger.info(ut.repr2(testres.common_acfg))
logger.info(ut.repr2(testres.common_cfgdict))
ut.printex(ex)
raise
if with_size:
if ut.get_argflag('--hack_size_nl'):
title_aug += '\n'
if testres.has_constant_qaids():
title_aug += ' #qaids=%r' % (len(testres.qaids),)
elif testres.has_constant_length_qaids():
title_aug += ' #qaids=%r*' % (len(testres.cfgx2_qaids[0]),)
if testres.has_constant_daids():
daids = testres.cfgx2_daids[0]
title_aug += ' #daids=%r' % (len(testres.cfgx2_daids[0]),)
if testres.has_constant_qaids():
all_daid_per_name_stats = ut.get_stats(
ibs.get_num_annots_per_name(daids)[0], use_nan=True
)
if all_daid_per_name_stats['std'] == 0:
title_aug += ' dper_name=%s' % (
ut.scalar_str(
all_daid_per_name_stats['mean'], max_precision=2
),
)
else:
title_aug += ' dper_name=%s±%s' % (
ut.scalar_str(all_daid_per_name_stats['mean'], precision=2),
ut.scalar_str(all_daid_per_name_stats['std'], precision=2),
)
elif testres.has_constant_length_daids():
daids = testres.cfgx2_daids[0]
title_aug += ' #daids=%r*' % (len(testres.cfgx2_daids[0]),)
if friendly:
# Hackiness for friendliness
# title_aug = title_aug.replace('db=PZ_Master1', 'Plains Zebras')
# title_aug = title_aug.replace('db=NNP_MasterGIRM_core', 'Masai Giraffes')
# title_aug = title_aug.replace('db=GZ_ALL', 'Grevy\'s Zebras')
title_aug = ut.multi_replace(
title_aug,
list(ibs.const.DBNAME_ALIAS.keys()),
list(ibs.const.DBNAME_ALIAS.values()),
)
# title_aug = title_aug.replace('db=PZ_Master1', 'db=PZ')
# title_aug = title_aug.replace('db=NNP_MasterGIRM_core', 'Masai Giraffes')
# title_aug = title_aug.replace('db=GZ_ALL', 'Grevy\'s Zebras')
return title_aug
def get_fname_aug(testres, **kwargs):
import re
title_aug = testres.get_title_aug(**kwargs)
valid_regex = '-a-zA-Z0-9_.() '
valid_extra = '=,'
valid_regex += valid_extra
title_aug = title_aug.replace(' ', '_') # spaces suck
fname_aug = re.sub('[^' + valid_regex + ']+', '', title_aug)
fname_aug = fname_aug.strip('_')
return fname_aug
def print_pcfg_info(testres):
"""
Prints verbose information about each pipeline configuration
>>> from wbia.expt.test_result import * # NOQA
"""
# TODO: Rectify with other printers
# for pcfgx, (pipecfg, lbl) in enumerate(zip(pipecfg_list, pipecfg_lbls)):
# logger.info('+--- %d / %d ===' % (pcfgx, (len(pipecfg_list))))
# ut.colorprint(lbl, 'white')
# logger.info(pipecfg.get_cfgstr())
# logger.info('L___')
# for qreq_ in testres.cfgx2_qreq_:
# logger.info(qreq_.get_full_cfgstr())
# cfgdict_list = [qreq_.qparams for qreq_ in testres.cfgx2_qreq_]
experiment_helpers.print_pipe_configs(testres.cfgx2_pcfg, testres.cfgx2_qreq_)
def print_acfg_info(testres, **kwargs):
"""
Prints verbose information about the annotations used in each test
configuration
CommandLine:
python -m wbia --tf TestResult.print_acfg_info
Kwargs:
see ibs.get_annot_stats_dict
hashid, per_name, per_qual, per_vp, per_name_vpedge, per_image,
min_name_hourdist
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST',
>>> a=['ctrl::unctrl_comp'],
>>> t=['candk:K=[1,2]'])
>>> ibs = None
>>> result = testres.print_acfg_info()
>>> print(result)
"""
from wbia.expt import annotation_configs
ibs = testres.ibs
# Get unique annotation configs
cfgx2_acfg_label = annotation_configs.get_varied_acfg_labels(testres.cfgx2_acfg)
flags = ut.flag_unique_items(cfgx2_acfg_label)
qreq_list = ut.compress(testres.cfgx2_qreq_, flags)
acfg_list = ut.compress(testres.cfgx2_acfg, flags)
expanded_aids_list = [(qreq_.qaids, qreq_.daids) for qreq_ in qreq_list]
annotation_configs.print_acfg_list(acfg_list, expanded_aids_list, ibs, **kwargs)
def print_unique_annot_config_stats(testres, ibs=None):
r"""
Args:
ibs (IBEISController): wbia controller object(default = None)
CommandLine:
python -m wbia TestResult.print_unique_annot_config_stats
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl::unctrl_comp'])
>>> ibs = None
>>> result = testres.print_unique_annot_config_stats(ibs)
>>> print(result)
"""
if ibs is None:
ibs = testres.ibs
cfx2_dannot_hashid = [
ibs.get_annot_hashid_visual_uuid(daids) for daids in testres.cfgx2_daids
]
unique_daids = ut.compress(
testres.cfgx2_daids, ut.flag_unique_items(cfx2_dannot_hashid)
)
with ut.Indenter('[acfgstats]'):
logger.info('+====')
logger.info('Printing %d unique annotconfig stats' % (len(unique_daids)))
common_acfg = testres.common_acfg
common_acfg['common'] = ut.dict_filter_nones(common_acfg['common'])
logger.info('testres.common_acfg = ' + ut.repr2(common_acfg))
logger.info(
'param_basis(len(daids)) = %r' % (testres.get_param_basis('len(daids)'),)
)
for count, daids in enumerate(unique_daids):
logger.info('+---')
logger.info('acfgx = %r/%r' % (count, len(unique_daids)))
if testres.has_constant_qaids():
ibs.print_annotconfig_stats(testres.qaids, daids)
else:
ibs.print_annot_stats(daids, prefix='d')
logger.info('L___')
def report(testres):
testres.print_results()
def print_results(testres, **kwargs):
r"""
CommandLine:
python -m wbia --tf TestResult.print_results
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.expt import harness
>>> ibs, testres = harness.testdata_expts('PZ_MTEST')
>>> result = testres.print_results()
>>> print(result)
"""
from wbia.expt import experiment_printres
ibs = testres.ibs
experiment_printres.print_results(ibs, testres, **kwargs)
def get_common_qaids(testres):
if not testres.has_constant_qaids():
# Get only cases the tests share for now
common_qaids = reduce(np.intersect1d, testres.cfgx2_qaids)
return common_qaids
else:
return testres.qaids
def get_all_qaids(testres):
all_qaids = np.array(ut.unique(ut.flatten(testres.cfgx2_qaids)))
return all_qaids
def get_test_qaids(testres):
# Transition fucntion
return testres.get_all_qaids()
# return testres.get_common_qaids()
# all_qaids = ut.unique(ut.flatten(testres.cfgx2_qaids))
# return all_qaids
def get_all_tags(testres):
r"""
CommandLine:
python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :
python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h
python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h,max_gt_rank=5
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> all_tags = testres.get_all_tags()
>>> selected_tags = ut.take(all_tags, case_pos_list.T[0])
>>> flat_tags = list(map(str, ut.flatten(ut.flatten(selected_tags))))
>>> print(ut.repr2(ut.dict_hist(flat_tags), key_order_metric='val'))
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> pt.word_histogram2(flat_tags, fnum=1, pnum=(1, 2, 1))
>>> pt.wordcloud(' '.join(flat_tags), fnum=1, pnum=(1, 2, 2))
>>> pt.set_figtitle(ut.get_cfg_lbl(filt_cfg))
>>> ut.show_if_requested()
"""
gt_tags = testres.get_gt_tags()
gf_tags = testres.get_gf_tags()
all_tags = [ut.list_zipflatten(*item) for item in zip(gf_tags, gt_tags)]
return all_tags
def get_gf_tags(testres):
r"""
Returns:
list: case_pos_list
CommandLine:
python -m wbia --tf TestResult.get_gf_tags --db PZ_Master1 --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> gf_tags = testres.get_gf_tags()
"""
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
gf_annotmatch_rowids = truth2_prop['gf']['annotmatch_rowid']
gf_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gf_annotmatch_rowids)
return gf_tags
def get_gt_tags(testres):
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
gt_annotmatch_rowids = truth2_prop['gt']['annotmatch_rowid']
gt_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gt_annotmatch_rowids)
return gt_tags
def get_gt_annot_tags(testres):
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
gt_annot_tags = ibs.unflat_map(ibs.get_annot_case_tags, truth2_prop['gt']['aid'])
return gt_annot_tags
def get_query_annot_tags(testres):
# FIXME: will break with new config structure
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
unflat_qids = np.tile(testres.qaids[:, None], (len(testres.cfgx2_qaids)))
query_annot_tags = ibs.unflat_map(ibs.get_annot_case_tags, unflat_qids)
return query_annot_tags
def get_gtquery_annot_tags(testres):
gt_annot_tags = testres.get_gt_annot_tags()
query_annot_tags = testres.get_query_annot_tags()
both_tags = [
[ut.flatten(t) for t in zip(*item)]
for item in zip(query_annot_tags, gt_annot_tags)
]
return both_tags
def case_sample2(testres, filt_cfg, qaids=None, return_mask=False, verbose=None):
r"""
Filters individual test result cases based on how they performed, what
tags they had, and various other things.
Args:
filt_cfg (dict):
Returns:
list: case_pos_list (list of (qx, cfgx)) or isvalid mask
CommandLine:
python -m wbia TestResult.case_sample2
python -m wbia TestResult.case_sample2:0
python -m wbia TestResult.case_sample2:1 --db GZ_ALL --filt :min_tags=1
python -m wbia TestResult.case_sample2:1 --db PZ_Master1 --filt :min_gf_tags=1
python -m wbia TestResult.case_sample2:2 --db PZ_Master1
Example:
>>> # DISABLE_DOCTEST
>>> # The same results is achievable with different filter config settings
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> verbose = True
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> filt_cfg1 = {'fail': True}
>>> case_pos_list1 = testres.case_sample2(filt_cfg1)
>>> filt_cfg2 = {'min_gtrank': 1}
>>> case_pos_list2 = testres.case_sample2(filt_cfg2)
>>> filt_cfg3 = {'min_gtrank': 0}
>>> case_pos_list3 = testres.case_sample2(filt_cfg3)
>>> filt_cfg4 = {}
>>> case_pos_list4 = testres.case_sample2(filt_cfg4)
>>> assert np.all(case_pos_list1 == case_pos_list2), 'should be equiv configs'
>>> assert np.any(case_pos_list2 != case_pos_list3), 'should be diff configs'
>>> assert np.all(case_pos_list3 == case_pos_list4), 'should be equiv configs'
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:sv_on=[True,False]'])
>>> filt_cfg5 = filt_cfg1.copy()
>>> mask5 = testres.case_sample2(filt_cfg5, return_mask=True)
>>> case_pos_list5 = testres.case_sample2(filt_cfg5, return_mask=False)
>>> assert len(mask5.shape) == 2
>>> assert np.all(mask5.T[0] == mask5.T[1])
>>> filt_cfg6 = {'fail': True, 'allcfg': True}
>>> mask6 = testres.case_sample2(filt_cfg6, return_mask=True)
>>> assert np.all(mask6.T[0] == mask6.T[1])
>>> print(mask5)
>>> print(case_pos_list5)
>>> filt_cfg = filt_cfg7 = {'disagree': True}
>>> case_pos_list7 = testres.case_sample2(filt_cfg7, verbose=verbose)
>>> print(case_pos_list7)
Example:
>>> # SCRIPT
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> result = ('case_pos_list = %s' % (str(case_pos_list),))
>>> print(result)
>>> # Extra stuff
>>> all_tags = testres.get_all_tags()
>>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])
>>> print('selcted_tags = %r' % (selcted_tags,))
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:K=[1,2,3]'])
>>> ut.exec_funckw(testres.case_sample2, globals())
>>> filt_cfg = {'fail': True, 'min_gtrank': 1, 'max_gtrank': None, 'min_gf_timedelta': '24h'}
>>> ibs, testres = main_helpers.testdata_expts('humpbacks_fb', a=['default:has_any=hasnotch,mingt=2,qindex=0:300,dindex=0:300'], t=['default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_net=annot_simple', 'default:proot=vsmany'], qaid_override=[12])
>>> filt_cfg = ':disagree=True,index=0:8,min_gtscore=.00001,require_all_cfg=True'
>>> #filt_cfg = cfghelpers.parse_argv_cfg('--filt')[0]
>>> case_pos_list = testres.case_sample2(filt_cfg, verbose=True)
>>> result = ('case_pos_list = %s' % (str(case_pos_list),))
>>> print(result)
>>> # Extra stuff
>>> all_tags = testres.get_all_tags()
>>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])
>>> print('selcted_tags = %r' % (selcted_tags,))
logger.info('qaid = %r' % (qaid,))
logger.info('qx = %r' % (qx,))
logger.info('cfgxs = %r' % (cfgxs,))
# print testres info about this item
take_cfgs = ut.partial(ut.take, index_list=cfgxs)
take_qx = ut.partial(ut.take, index_list=qx)
truth_cfgs = ut.hmap_vals(take_qx, truth2_prop)
truth_item = ut.hmap_vals(take_cfgs, truth_cfgs, max_depth=1)
prop_cfgs = ut.hmap_vals(take_qx, prop2_mat)
prop_item = ut.hmap_vals(take_cfgs, prop_cfgs, max_depth=0)
logger.info('truth2_prop[item] = ' + ut.repr3(truth_item, nl=2))
logger.info('prop2_mat[item] = ' + ut.repr3(prop_item, nl=1))
"""
from wbia.expt import cfghelpers
if verbose is None:
verbose = ut.NOT_QUIET
if verbose:
logger.info('[testres] case_sample2')
if isinstance(filt_cfg, str):
filt_cfg = [filt_cfg]
if isinstance(filt_cfg, list):
_combos = cfghelpers.parse_cfgstr_list2(filt_cfg, strict=False)
filt_cfg = ut.flatten(_combos)[0]
if isinstance(filt_cfg, str):
_combos = cfghelpers.parse_cfgstr_list2([filt_cfg], strict=False)
filt_cfg = ut.flatten(_combos)[0]
if filt_cfg is None:
filt_cfg = {}
qaids = testres.get_test_qaids() if qaids is None else qaids
truth2_prop, prop2_mat = testres.get_truth2_prop(qaids)
ibs = testres.ibs
# Initialize isvalid flags to all true
# np.ones(prop2_mat['is_success'].shape, dtype=np.bool)
participates = prop2_mat['participates']
is_valid = participates.copy()
def unflat_tag_filterflags(tags_list, **kwargs):
from wbia import tag_funcs
flat_tags, cumsum = ut.invertible_flatten2(tags_list)
flat_flags = tag_funcs.filterflags_general_tags(flat_tags, **kwargs)
flags = np.array(ut.unflatten2(flat_flags, cumsum))
return flags
UTFF = unflat_tag_filterflags
def cols_disagree(mat, val):
"""
is_success = prop2_mat['is_success']
"""
nCols = mat.shape[1]
sums = mat.sum(axis=1)
# Find out which rows have different values
disagree_flags1d = np.logical_and(sums > 0, sums < nCols)
disagree_flags2d = np.tile(disagree_flags1d[:, None], (1, nCols))
if not val:
# User asked for rows that agree
flags = np.logical_not(disagree_flags2d)
else:
flags = disagree_flags2d
return flags
def cfg_scoresep(mat, val, op):
"""
Compares scores between different configs
op = operator.ge
is_success = prop2_mat['is_success']
"""
# import scipy.spatial.distance as spdist
nCols = mat.shape[1]
pdistx = vt.pdist_indicies(nCols)
pdist_list = np.array([vt.safe_pdist(row) for row in mat])
flags_list = op(pdist_list, val)
colx_list = [
np.unique(ut.flatten(ut.compress(pdistx, flags))) for flags in flags_list
]
offsets = np.arange(0, nCols * len(mat), step=nCols)
idx_list = ut.flatten(
[colx + offset for colx, offset in zip(colx_list, offsets)]
)
mask = vt.index_to_boolmask(idx_list, maxval=offsets[-1] + nCols)
flags = mask.reshape(mat.shape)
return flags
# List of rules that can filter results
rule_list = [
('disagree', lambda val: cols_disagree(prop2_mat['is_failure'], val)),
(
'min_gt_cfg_scoresep',
lambda val: cfg_scoresep(truth2_prop['gt']['score'], val, operator.ge),
),
('fail', prop2_mat['is_failure']),
('success', prop2_mat['is_success']),
('min_gtrank', partial(operator.ge, truth2_prop['gt']['rank'])),
('max_gtrank', partial(operator.le, truth2_prop['gt']['rank'])),
('max_gtscore', partial(operator.le, truth2_prop['gt']['score'])),
('min_gtscore', partial(operator.ge, truth2_prop['gt']['score'])),
('min_gf_timedelta', partial(operator.ge, truth2_prop['gf']['timedelta'])),
('max_gf_timedelta', partial(operator.le, truth2_prop['gf']['timedelta'])),
# Tag filtering
# FIXME: will break with new config structure
('min_tags', lambda val: UTFF(testres.get_all_tags(), min_num=val)),
('max_tags', lambda val: UTFF(testres.get_all_tags(), max_num=val)),
('min_gf_tags', lambda val: UTFF(testres.get_gf_tags(), min_num=val)),
('max_gf_tags', lambda val: UTFF(testres.get_gf_tags(), max_num=val)),
('min_gt_tags', lambda val: UTFF(testres.get_gt_tags(), min_num=val)),
('max_gt_tags', lambda val: UTFF(testres.get_gt_tags(), max_num=val)),
(
'min_query_annot_tags',
lambda val: UTFF(testres.get_query_annot_tags(), min_num=val),
),
(
'min_gt_annot_tags',
lambda val: UTFF(testres.get_gt_annot_tags(), min_num=val),
),
(
'min_gtq_tags',
lambda val: UTFF(testres.get_gtquery_annot_tags(), min_num=val),
),
(
'max_gtq_tags',
lambda val: UTFF(testres.get_gtquery_annot_tags(), max_num=val),
),
('without_gf_tag', lambda val: UTFF(testres.get_gf_tags(), has_none=val)),
('without_gt_tag', lambda val: UTFF(testres.get_gt_tags(), has_none=val)),
('with_gf_tag', lambda val: UTFF(testres.get_gf_tags(), has_any=val)),
('with_gt_tag', lambda val: UTFF(testres.get_gt_tags(), has_any=val)),
('with_tag', lambda val: UTFF(testres.get_all_tags(), has_any=val)),
('without_tag', lambda val: UTFF(testres.get_all_tags(), has_none=val)),
]
rule_dict = ut.odict(rule_list)
rule_list.append(('max_gf_td', rule_dict['max_gf_timedelta']))
rule_list.append(('min_gf_td', rule_dict['min_gf_timedelta']))
filt_cfg_ = copy.deepcopy(filt_cfg)
# hack to convert to seconds
for tdkey in filt_cfg_.keys():
# timedelta_keys = ['min_gf_timedelta', 'max_gf_timedelta']
# for tdkey in timedelta_keys:
if tdkey.endswith('_timedelta'):
filt_cfg_[tdkey] = ut.ensure_timedelta(filt_cfg_[tdkey])
class VerbFilterInfo(object):
def __init__(self):
self.prev_num_valid = None
def print_pre(self, is_valid, filt_cfg_):
num_valid = is_valid.sum()
logger.info(
'[testres] Sampling from is_valid.size=%r with filt=%r'
% (is_valid.size, ut.get_cfg_lbl(filt_cfg_))
)
logger.info(' * is_valid.shape = %r' % (is_valid.shape,))
logger.info(' * num_valid = %r' % (num_valid,))
self.prev_num_valid = num_valid
def print_post(self, is_valid, flags, msg):
if flags is not None:
num_passed = flags.sum()
num_valid = is_valid.sum()
num_invalidated = self.prev_num_valid - num_valid
logger.info(msg)
if num_invalidated == 0:
if flags is not None:
logger.info(' * num_passed = %r' % (num_passed,))
logger.info(' * num_invalided = %r' % (num_invalidated,))
else:
logger.info(' * prev_num_valid = %r' % (self.prev_num_valid,))
logger.info(' * num_valid = %r' % (num_valid,))
# logger.info(' * is_valid.shape = %r' % (is_valid.shape,))
self.prev_num_valid = num_valid
verbinfo = VerbFilterInfo()
if verbose:
verbinfo.print_pre(is_valid, filt_cfg_)
# Pop irrelevant info
ut.delete_keys(filt_cfg_, ['_cfgstr', '_cfgindex', '_cfgname', '_cfgtype'])
# Pop other non-rule config options
valid_rules = []
def poprule(rulename, default):
# register other rule names for debuging
valid_rules.append(rulename)
return filt_cfg_.pop(rulename, default)
allcfg = poprule('allcfg', None)
orderby = poprule('orderby', None)
reverse = poprule('reverse', None)
sortasc = poprule('sortasc', None)
sortdsc = poprule('sortdsc', poprule('sortdesc', None))
max_pername = poprule('max_pername', None)
require_all_cfg = poprule('require_all_cfg', None)
index = poprule('index', None)
# Pop all chosen rules
rule_value_list = [poprule(key, None) for key, rule in rule_list]
# Assert that only valid configurations were given
if len(filt_cfg_) > 0:
logger.info('ERROR')
logger.info('filtcfg valid rules are = %s' % (ut.repr2(valid_rules, nl=1),))
for key in filt_cfg_.keys():
logger.info(
'did you mean %r instead of %r?'
% (ut.closet_words(key, valid_rules)[0], key)
)
raise NotImplementedError(
'Unhandled filt_cfg.keys() = %r' % (filt_cfg_.keys())
)
# Remove test cases that do not satisfy chosen rules
chosen_rule_idxs = ut.where([val is not None for val in rule_value_list])
chosen_rules = ut.take(rule_list, chosen_rule_idxs)
chosen_vals = ut.take(rule_value_list, chosen_rule_idxs)
for (key, rule), val in zip(chosen_rules, chosen_vals):
if isinstance(rule, np.ndarray):
# When a rule is an ndarray it must have boolean values
flags = rule == val
else:
flags = rule(val)
# HACK: flags are forced to be false for non-participating cases
flags = np.logical_and(flags, participates)
# conjunctive normal form of satisfiability
is_valid = np.logical_and(is_valid, flags)
if verbose:
verbinfo.print_post(is_valid, flags, 'SampleRule: %s = %r' % (key, val))
# HACK:
# If one config for a row passes the filter then all configs should pass
if allcfg:
is_valid = np.logical_or(np.logical_or.reduce(is_valid.T)[:, None], is_valid)
is_valid = np.logical_and(is_valid, participates)
qx_list, cfgx_list = np.nonzero(is_valid)
# Determine a good ordering of the test cases
if sortdsc is not None:
assert orderby is None, 'use orderby or sortasc'
assert reverse is None, 'reverse does not work with sortdsc'
orderby = sortdsc
reverse = True
elif sortasc is not None:
assert reverse is None, 'reverse does not work with sortasc'
assert orderby is None, 'use orderby or sortasc'
orderby = sortasc
reverse = False
else:
reverse = False
if orderby is not None:
# if orderby == 'gtscore':
# order_values = truth2_prop['gt']['score']
# elif orderby == 'gfscore':
# order_values = truth2_prop['gf']['score']
# else:
import re
order_values = None
for prefix_pattern in ['^gt_?', '^gf_?']:
prefix_match = re.match(prefix_pattern, orderby)
if prefix_match is not None:
truth = prefix_pattern[1:3]
propname = orderby[prefix_match.end() :]
if verbose:
logger.info(
'Ordering by truth=%s propname=%s' % (truth, propname)
)
order_values = truth2_prop[truth][propname]
break
if order_values is None:
raise NotImplementedError('Unknown orerby=%r' % (orderby,))
else:
order_values = np.arange(is_valid.size).reshape(is_valid.shape)
# Convert mask into indicies
flat_order = order_values[is_valid]
# Flat sorting indeices in a matrix
if verbose:
if verbose:
logger.info('Reversing ordering (descending)')
else:
logger.info('Normal ordering (ascending)')
if reverse:
sortx = flat_order.argsort()[::-1]
else:
sortx = flat_order.argsort()
qx_list = qx_list.take(sortx, axis=0)
cfgx_list = cfgx_list.take(sortx, axis=0)
# Return at most ``max_pername`` annotation examples per name
if max_pername is not None:
if verbose:
logger.info('Returning at most %d cases per name ' % (max_pername,))
# FIXME: multiple configs
_qaid_list = np.take(qaids, qx_list)
_qnid_list = ibs.get_annot_nids(_qaid_list)
_valid_idxs = []
seen_ = ut.ddict(lambda: 0)
for idx, _qnid in enumerate(_qnid_list):
if seen_[_qnid] < max_pername:
seen_[_qnid] += 1
_valid_idxs.append(idx)
_qx_list = qx_list[_valid_idxs]
_cfgx_list = cfgx_list[_valid_idxs]
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if require_all_cfg:
if verbose:
prev_num_valid = is_valid.sum()
logger.info('Enforcing that all configs must pass filters')
logger.info(' * prev_num_valid = %r' % (prev_num_valid,))
qx2_valid_cfgs = ut.group_items(cfgx_list, qx_list)
hasall_cfg = [len(qx2_valid_cfgs[qx]) == testres.nConfig for qx in qx_list]
_qx_list = qx_list.compress(hasall_cfg)
_cfgx_list = cfgx_list.compress(hasall_cfg)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(
is_valid, None, 'Enforcing that all configs must pass filters'
)
if index is not None:
if isinstance(index, str):
index = ut.smart_cast(index, slice)
_qx_list = ut.take(qx_list, index)
_cfgx_list = ut.take(cfgx_list, index)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(
is_valid,
None,
'Taking index=%r sample from len(qx_list) = %r'
% (index, len(qx_list)),
)
if not return_mask:
case_pos_list = np.vstack((qx_list, cfgx_list)).T
case_identifier = case_pos_list
else:
if verbose:
logger.info('Converting cases indicies to a 2d-mask')
case_identifier = is_valid
if verbose:
logger.info('Finished case filtering')
logger.info('Final case stats:')
qx_hist = ut.dict_hist(qx_list)
logger.info(
'config per query stats: %r' % (ut.get_stats_str(qx_hist.values()),)
)
logger.info(
'query per config stats: %r'
% (ut.get_stats_str(ut.dict_hist(cfgx_list).values()),)
)
return case_identifier
def get_truth2_prop(testres, qaids=None, join_acfg=False):
r"""
Returns:
tuple: (truth2_prop, prop2_mat)
CommandLine:
python -m wbia.expt.test_result --exec-get_truth2_prop --show
Example:
>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> (truth2_prop, prop2_mat) = testres.get_truth2_prop()
>>> result = '(truth2_prop, prop2_mat) = %s' % str((truth2_prop, prop2_mat))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> ut.show_if_requested()
"""
ibs = testres.ibs
test_qaids = testres.get_test_qaids() if qaids is None else qaids
# test_qaids = ut.random_sample(test_qaids, 20)
truth2_prop = ut.ddict(ut.odict)
# TODO: have this function take in a case_pos_list as input instead
participates = testres.get_infoprop_mat('participant', test_qaids)
truth2_prop['gt']['aid'] = testres.get_infoprop_mat('qx2_gt_aid', test_qaids)
truth2_prop['gf']['aid'] = testres.get_infoprop_mat('qx2_gf_aid', test_qaids)
truth2_prop['gt']['rank'] = testres.get_infoprop_mat('qx2_gt_rank', test_qaids)
truth2_prop['gf']['rank'] = testres.get_infoprop_mat('qx2_gf_rank', test_qaids)
truth2_prop['gt']['score'] = testres.get_infoprop_mat(
'qx2_gt_raw_score', test_qaids
)
truth2_prop['gf']['score'] = testres.get_infoprop_mat(
'qx2_gf_raw_score', test_qaids
)
truth2_prop['gt']['score'] = np.nan_to_num(truth2_prop['gt']['score'])
truth2_prop['gf']['score'] = np.nan_to_num(truth2_prop['gf']['score'])
# Cast nans to ints (that are participants)
# if False:
for truth in ['gt', 'gf']:
rank_mat = truth2_prop[truth]['rank']
flags = np.logical_and(np.isnan(rank_mat), participates)
rank_mat[flags] = testres.get_worst_possible_rank()
# truth2_prop[truth]['rank'] = rank_mat.astype(np.int)
is_success = truth2_prop['gt']['rank'] == 0
is_failure = np.logical_not(is_success)
# THIS IS NOT THE CASE IF THERE ARE UNKNOWN INDIVIDUALS IN THE DATABASE
assert np.all(is_success == (truth2_prop['gt']['rank'] == 0))
# WEIRD THINGS HAPPEN WHEN UNKNOWNS ARE HERE
# hardness_degree_rank[is_success]
# These probably just completely failure spatial verification
# is_weird = hardness_degree_rank == 0
# Get timedelta and annotmatch rowid
for truth in ['gt', 'gf']:
aid_mat = truth2_prop[truth]['aid']
timedelta_mat = np.vstack(
[ibs.get_annot_pair_timedelta(test_qaids, aids) for aids in aid_mat.T]
).T
annotmatch_rowid_mat = np.vstack(
[
ibs.get_annotmatch_rowid_from_undirected_superkey(test_qaids, aids)
for aids in aid_mat.T
]
).T
truth2_prop[truth]['annotmatch_rowid'] = annotmatch_rowid_mat
truth2_prop[truth]['timedelta'] = timedelta_mat
prop2_mat = {}
prop2_mat['is_success'] = is_success
prop2_mat['is_failure'] = is_failure
prop2_mat['participates'] = participates
groupxs = testres.get_cfgx_groupxs()
def group_prop(val, grouped_flags, groupxs):
nRows = len(val)
# Allocate space for new val
new_shape = (nRows, len(groupxs))
if val.dtype == object or val.dtype.type == object:
new_val = np.full(new_shape, None, dtype=val.dtype)
elif ut.is_float(val):
new_val = np.full(new_shape, np.nan, dtype=val.dtype)
else:
new_val = np.zeros(new_shape, dtype=val.dtype)
# Populate new val
grouped_vals = vt.apply_grouping(val.T, groupxs)
_iter = enumerate(zip(grouped_flags, grouped_vals))
for new_col, (flags, group) in _iter:
rows, cols = np.where(flags.T)
new_val[rows, new_col] = group.T[(rows, cols)]
return new_val
if join_acfg:
assert ut.allsame(participates.sum(axis=1))
grouped_flags = vt.apply_grouping(participates.T, groupxs)
# new_prop2_mat = {key: group_prop(val)
# for key, val in prop2_mat.items()}
# new_truth2_prop = {
# truth: {key: group_prop(val)
# for key, val in props.items()}
# for truth, props in truth2_prop.items()}
new_prop2_mat = {}
for key, val in prop2_mat.items():
new_prop2_mat[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop = {}
for truth, props in truth2_prop.items():
new_props = {}
for key, val in props.items():
new_props[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop[truth] = new_props
prop2_mat_ = new_prop2_mat
truth2_prop_ = new_truth2_prop
else:
prop2_mat_ = prop2_mat
truth2_prop_ = truth2_prop
return truth2_prop_, prop2_mat_
def interact_individual_result(testres, qaid, cfgx=0):
ibs = testres.ibs
cfgx_list = ut.ensure_iterable(cfgx)
qreq_list = ut.take(testres.cfgx2_qreq_, cfgx_list)
# Preload any requested configs
cm_list = [qreq_.execute(qaids=[qaid]) for qreq_ in qreq_list]
cfgx2_shortlbl = testres.get_short_cfglbls()
show_kwargs = {
'N': 3,
'ori': True,
'ell_alpha': 0.9,
}
# SHOW ANALYSIS
show_kwargs['show_query'] = False
show_kwargs['viz_name_score'] = True
show_kwargs['show_timedelta'] = True
show_kwargs['show_gf'] = True
show_kwargs['with_figtitle'] = False
for cfgx, cm, qreq_ in zip(cfgx_list, cm_list, qreq_list):
query_lbl = cfgx2_shortlbl[cfgx]
fnum = cfgx
cm.ishow_analysis(
ibs,
figtitle=query_lbl,
fnum=fnum,
annot_mode=1,
qreq_=qreq_,
**show_kwargs
)
def draw_score_diff_disti(testres):
r"""
CommandLine:
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db PZ_Master1
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db GZ_Master1
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td1h -t best --db GIRM_Master1
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td:qmin_pername=3,dpername=2 -t best --db PZ_Master1
python -m wbia --tf get_annotcfg_list -a varynannots_td -t best --db PZ_Master1
13502
python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td:dsample_size=.01 -t best --show --qaid 13502
python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td -t best --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_Master1', a=['varynannots_td'], t=['best'])
>>> result = testres.draw_score_diff_disti()
>>> print(result)
>>> ut.show_if_requested()
"""
import wbia.plottool as pt
import vtool as vt
# dont look at filtered cases
ibs = testres.ibs
qaids = testres.get_test_qaids()
qaids = ibs.get_annot_tag_filterflags(qaids, {'has_none': 'timedeltaerror'})
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
gt_valid_flags_list = np.isfinite(gt_rawscore).T
gf_valid_flags_list = np.isfinite(gf_rawscore).T
cfgx2_gt_scores = vt.zipcompress(gt_rawscore.T, gt_valid_flags_list)
cfgx2_gf_scores = vt.zipcompress(gf_rawscore.T, gf_valid_flags_list)
# partition by rank
gt_rank = testres.get_infoprop_mat('qx2_gt_rank', qaids=qaids)
gf_ranks = testres.get_infoprop_mat('qx2_gf_rank', qaids=qaids)
cfgx2_gt_ranks = vt.zipcompress(gt_rank.T, gt_valid_flags_list)
cfgx2_rank0_gt_scores = vt.zipcompress(
cfgx2_gt_scores, [ranks == 0 for ranks in cfgx2_gt_ranks]
)
cfgx2_rankX_gt_scores = vt.zipcompress(
cfgx2_gt_scores, [ranks > 0 for ranks in cfgx2_gt_ranks]
)
cfgx2_gf_ranks = vt.zipcompress(gf_ranks.T, gf_valid_flags_list)
cfgx2_rank0_gf_scores = vt.zipcompress(
cfgx2_gf_scores, [ranks == 0 for ranks in cfgx2_gf_ranks]
)
# valid_gtranks = gt_rank[isvalid]
# valid_qaids = qaids[isvalid]
# Hack remove timedelta error
# valid_qaids = valid_qaids[flags]
# valid_gt_rawscore = valid_gt_rawscore[flags]
# valid_gtranks = valid_gtranks[flags]
xdata = list(map(len, testres.cfgx2_daids))
USE_MEDIAN = True # not ut.get_argflag('--use-mean')
# USE_LOG = True
USE_LOG = False
if USE_MEDIAN:
ave = np.median
dev = vt.median_abs_dev
else:
ave = np.mean
dev = np.std
def make_interval_args(arr_list, ave=ave, dev=dev, **kwargs):
# if not USE_MEDIAN:
# # maybe approximate median by removing the most extreme values
# arr_list = [np.array(sorted(arr))[5:-5] for arr in arr_list]
import utool as ut
if USE_LOG:
arr_list = list(map(lambda x: np.log(x + 1), arr_list))
sizes_ = list(map(len, arr_list))
ydata_ = list(map(ave, arr_list))
spread_ = list(map(dev, arr_list))
# ut.get_stats(arr_list, axis=0)
label = kwargs.get('label', '')
label += ' ' + ut.get_funcname(ave)
kwargs['label'] = label
logger.info(
label
+ 'score stats : '
+ ut.repr2(
ut.get_jagged_stats(arr_list, use_median=True), nl=1, precision=1
)
)
return ydata_, spread_, kwargs, sizes_
args_list1 = [
make_interval_args(cfgx2_gt_scores, label='GT', color=pt.TRUE_BLUE),
make_interval_args(cfgx2_gf_scores, label='GF', color=pt.FALSE_RED),
]
args_list2 = [
make_interval_args(
cfgx2_rank0_gt_scores, label='GT-rank = 0', color=pt.LIGHT_GREEN
),
make_interval_args(
cfgx2_rankX_gt_scores, label='GT-rank > 0', color=pt.YELLOW
),
make_interval_args(cfgx2_rank0_gf_scores, label='GF-rank = 0', color=pt.PINK),
# make_interval_args(cfgx2_rank2_gt_scores, label='gtrank < 2'),
]
plotargs_list = [args_list1, args_list2]
# plotargs_list = [args_list1]
ymax = -np.inf
ymin = np.inf
for args_list in plotargs_list:
ydata_list = np.array(ut.get_list_column(args_list, 0))
spread = np.array(ut.get_list_column(args_list, 1))
ymax = max(ymax, np.array(ydata_list + spread).max())
ymin = min(ymax, np.array(ydata_list - spread).min())
ylabel = 'log name score' if USE_LOG else 'name score'
statickw = dict(
# title='scores vs dbsize',
xlabel='database size (number of annotations)',
ylabel=ylabel,
# xscale='log', ymin=0, ymax=10,
linewidth=2,
spread_alpha=0.5,
lightbg=True,
marker='o',
# xmax='data',
ymax=ymax,
ymin=ymin,
xmax='data',
xmin='data',
)
fnum = pt.ensure_fnum(None)
pnum_ = pt.make_pnum_nextgen(len(plotargs_list), 1)
for args_list in plotargs_list:
ydata_list = ut.get_list_column(args_list, 0)
spread_list = ut.get_list_column(args_list, 1)
kwargs_list = ut.get_list_column(args_list, 2)
sizes_list = ut.get_list_column(args_list, 3)
logger.info('sizes_list = %s' % (ut.repr2(sizes_list, nl=1),))
# Pack kwargs list for multi_plot
plotkw = ut.dict_stack2(kwargs_list, '_list')
plotkw2 = ut.merge_dicts(statickw, plotkw)
pt.multi_plot(
xdata,
ydata_list,
spread_list=spread_list,
fnum=fnum,
pnum=pnum_(),
**plotkw2
)
# pt.adjust_subplots(hspace=.3)
figtitle = 'Score vs DBSize: %s' % (testres.get_title_aug())
pt.set_figtitle(figtitle)
def draw_rank_cmc(testres):
"""
Wrapper
"""
from wbia.expt import experiment_drawing
experiment_drawing.draw_rank_cmc(testres.ibs, testres)
def draw_match_cases(testres, **kwargs):
"""
Wrapper
"""
from wbia.expt import experiment_drawing
experiment_drawing.draw_match_cases(testres.ibs, testres, **kwargs)
def draw_failure_cases(testres, **kwargs):
"""
>>> from wbia.other.dbinfo import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts(defaultdb='PZ_MTEST', a='timectrl:qsize=2', t='invar:AI=[False],RI=False', use_cache=False)
"""
from wbia.expt import experiment_drawing
# kwargs = kwargs.copy()
orig_filter = ':'
kwargs['f'] = orig_filter + 'fail'
case_pos_list = testres.case_sample2(':fail=True,index=0:5')
experiment_drawing.draw_match_cases(
testres.ibs,
testres,
case_pos_list=case_pos_list,
annot_modes=[1],
interact=True,
)
def find_score_thresh_cutoff(testres):
"""
FIXME
DUPLICATE CODE
rectify with experiment_drawing
"""
# import wbia.plottool as pt
import vtool as vt
if ut.VERBOSE:
logger.info('[dev] FIX DUPLICATE CODE find_thresh_cutoff')
# from wbia.expt import cfghelpers
assert len(testres.cfgx2_qreq_) == 1, 'can only specify one config here'
cfgx = 0
# qreq_ = testres.cfgx2_qreq_[cfgx]
test_qaids = testres.get_test_qaids()
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=test_qaids).T[
cfgx
]
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=test_qaids).T[
cfgx
]
# FIXME: may need to specify which cfg is used in the future
# isvalid = testres.case_sample2(filt_cfg, return_mask=True).T[cfgx]
tp_nscores = gt_rawscore
tn_nscores = gf_rawscore
tn_qaids = tp_qaids = test_qaids
# encoder = vt.ScoreNormalizer(target_tpr=.7)
# logger.info(qreq_.get_cfgstr())
part_attrs = {1: {'qaid': tp_qaids}, 0: {'qaid': tn_qaids}}
fpr = None
tpr = 0.85
encoder = vt.ScoreNormalizer(adjust=8, fpr=fpr, tpr=tpr, monotonize=True)
# tp_scores = tp_nscores
# tn_scores = tn_nscores
name_scores, labels, attrs = encoder._to_xy(tp_nscores, tn_nscores, part_attrs)
encoder.fit(name_scores, labels, attrs)
score_thresh = encoder.learn_threshold2()
# Find intersection point
# TODO: add to score normalizer.
# Improve robustness
# pt.figure()
# pt.plot(xdata, curve)
# pt.plot(x_submax, y_submax, 'o')
return score_thresh
def print_percent_identification_success(testres):
"""
Prints names identified (at rank 1) / names queried.
This combines results over multiple queries of a particular name using
max
OLD, MAYBE DEPRIATE
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
"""
ibs = testres.ibs
qaids = testres.get_test_qaids()
unique_nids, groupxs = ut.group_indices(ibs.get_annot_nids(qaids))
qx2_gt_raw_score = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
qx2_gf_raw_score = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
nx2_gt_raw_score = np.array(
[
np.nanmax(scores, axis=0)
for scores in vt.apply_grouping(qx2_gt_raw_score, groupxs)
]
)
nx2_gf_raw_score = np.array(
[
np.nanmax(scores, axis=0)
for scores in vt.apply_grouping(qx2_gf_raw_score, groupxs)
]
)
cfgx2_success = (nx2_gt_raw_score > nx2_gf_raw_score).T
logger.info('Identification success (names identified / names queried)')
for cfgx, success in enumerate(cfgx2_success):
pipelbl = testres.cfgx2_lbl[cfgx]
percent = 100 * success.sum() / len(success)
logger.info(
'%2d) success = %r/%r = %.2f%% -- %s'
% (cfgx, success.sum(), len(success), percent, pipelbl)
)
def print_config_overlap(testres, with_plot=True):
truth2_prop, prop2_mat = testres.get_truth2_prop()
qx2_gt_ranks = truth2_prop['gt']['rank']
qx2_success = qx2_gt_ranks == 0
cfgx2_num_correct = np.nansum(qx2_success, axis=0)
best_cfgx = cfgx2_num_correct.argmax()
logger.info('Config Overlap')
# Matrix version
# disjoint_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
# improves_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
isect_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
union_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
for cfgx1 in range(testres.nConfig):
for cfgx2 in range(testres.nConfig):
if cfgx1 == cfgx2:
success_qx1 = np.where(qx2_success.T[cfgx1])[0]
isect_mat[cfgx1][cfgx2] = len(success_qx1)
union_mat[cfgx1][cfgx2] = len(success_qx1)
continue
success_qx1 = np.where(qx2_success.T[cfgx1])[0]
success_qx2 = np.where(qx2_success.T[cfgx2])[0]
union_ = np.union1d(success_qx1, success_qx2)
isect_ = np.intersect1d(success_qx1, success_qx2)
# disjoints = np.setdiff1d(union_, isect_)
# disjoint_mat[cfgx1][cfgx2] = len(disjoints)
isect_mat[cfgx1][cfgx2] = len(isect_)
union_mat[cfgx1][cfgx2] = len(union_)
# improves = np.setdiff1d(success_qx2, isect_)
# improves_mat[cfgx2][cfgx1] = len(improves)
n_success_list = np.array(
[qx2_success.T[cfgx1].sum() for cfgx1 in range(testres.nConfig)]
)
improves_mat = n_success_list[:, None] - isect_mat
disjoint_mat = union_mat - isect_mat
logger.info('n_success_list = %r' % (n_success_list,))
logger.info('union_mat =\n%s' % (union_mat,))
logger.info('isect_mat =\n%s' % (isect_mat,))
logger.info('cfgx1 and cfgx2 have <x> not in common')
logger.info('disjoint_mat =\n%s' % (disjoint_mat,))
logger.info('cfgx1 helps cfgx2 by <x>')
logger.info('improves_mat =\n%s' % (improves_mat,))
logger.info('improves_mat.sum(axis=1) = \n%s' % (improves_mat.sum(axis=1),))
bestx_by_improves = improves_mat.sum(axis=1).argmax()
logger.info('bestx_by_improves = %r' % (bestx_by_improves,))
# Numbered version
logger.info('best_cfgx = %r' % (best_cfgx,))
for cfgx in range(testres.nConfig):
if cfgx == best_cfgx:
continue
pipelbl = testres.cfgx2_lbl[cfgx]
qx2_anysuccess = np.logical_or(qx2_success.T[cfgx], qx2_success.T[best_cfgx])
# Queries that other got right that best did not get right
qx2_othersuccess = np.logical_and(
qx2_anysuccess, np.logical_not(qx2_success.T[best_cfgx])
)
logger.info(
'cfgx %d) has %d success cases that that the best config does not have -- %s'
% (cfgx, qx2_othersuccess.sum(), pipelbl)
)
qx2_success.T[cfgx]
if with_plot:
# y = None
# for x in qx2_gt_ranks:
# x = np.minimum(x, 3)
# z = (x.T - x[:, None])
# if np.any(z):
# logger.info(z)
# if y is None:
# y = z
# else:
# y += z
if False:
# Chip size stats
ave_dlen = [ # NOQA
np.sqrt(
np.array(
testres.ibs.get_annot_chip_dlensqrd(
testres.qaids, config2_=qreq_.query_config2_
)
)
).mean()
for qreq_ in testres.cfgx2_qreq_
]
ave_width_inimg = [ # NOQA
np.array(
testres.ibs.get_annot_bboxes(
testres.qaids, config2_=qreq_.query_config2_
)
)[:, 2 + 0].mean()
for qreq_ in testres.cfgx2_qreq_
]
ave_width = [ # NOQA
np.array(
testres.ibs.get_annot_chip_sizes(
testres.qaids, config2_=qreq_.query_config2_
)
)[:, 0].mean()
for qreq_ in testres.cfgx2_qreq_
]
import wbia.plottool as pt
# pt.plt.imshow(-y, interpolation='none', cmap='hot')
# pt.plt.colorbar()
def label_ticks():
import wbia.plottool as pt
ax = pt.gca()
labels = testres.get_varied_labels()
ax.set_xticks(list(range(len(labels))))
ax.set_xticklabels([lbl[0:100] for lbl in labels])
[lbl.set_rotation(-25) for lbl in ax.get_xticklabels()]
[lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()]
# xgrid, ygrid = np.meshgrid(range(len(labels)), range(len(labels)))
# pt.plot_surface3d(xgrid, ygrid, disjoint_mat)
ax.set_yticks(list(range(len(labels))))
ax.set_yticklabels([lbl[0:100] for lbl in labels])
[lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()]
[lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()]
# [lbl.set_rotation(20) for lbl in ax.get_yticklabels()]
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(union_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title(
'union mat: cfg<x> and cfg<y> have <z> success cases in in total'
)
label_ticks()
label_ticks()
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(isect_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title('isect mat: cfg<x> and cfg<y> have <z> success cases in common')
label_ticks()
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(disjoint_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title(
'disjoint mat (union - isect): cfg<x> and cfg<y> have <z> success cases not in common'
)
# xgrid, ygrid = np.meshgrid(range(len(labels)), range(len(labels)))
# pt.plot_surface3d(xgrid, ygrid, improves_mat)
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(improves_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title(
'improves mat (diag.T - isect): cfg<x> got <z> qaids that cfg <y> missed'
)
label_ticks()
# pt.colorbar(np.unique(y))
def map_score(testres):
"""
For each query compute a precision recall curve.
Then, for each query compute the average precision.
Then take the mean of all average precisions to obtain the mAP.
Script:
>>> #ibs = wbia.opendb('Oxford')
>>> #ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True]')
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True],can_match_sameimg=True')
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True')
"""
import sklearn.metrics
qaids = testres.get_test_qaids()
ibs = testres.ibs
PLOT = True
PLOT = False
cfgx2_cms = []
for qreq_ in testres.cfgx2_qreq_:
cm_list = qreq_.execute(qaids)
cm_list = [cm.extend_results(qreq_) for cm in cm_list]
for cm in cm_list:
cm.score_annot_csum(qreq_)
# #cm.sortself()
cfgx2_cms.append(cm_list)
map_list = []
unique_names, groupxs = ut.group_indices(ibs.annots(qaids).names)
for cm_list, qreq_ in zip(cfgx2_cms, testres.cfgx2_qreq_):
if PLOT:
import wbia.plottool as pt
pt.qt4ensure()
fnum = pt.ensure_fnum(None)
pt.figure(fnum=fnum)
avep_list = []
# fnum = pt.ensure_fnum(None)
# pt.figure(fnum=fnum)
for cm in cm_list:
# Ignore junk images
flags = np.array(ibs.annots(cm.daid_list).quality_texts) != 'junk'
assert np.all(flags)
daid_list = cm.daid_list
dnid_list = cm.dnid_list
y_true = (cm.qnid == dnid_list).compress(flags).astype(np.int)
y_score = cm.annot_score_list.compress(flags)
y_score[~np.isfinite(y_score)] = 0
y_score = np.nan_to_num(y_score)
sortx = np.argsort(y_score)[::-1]
daid_list = daid_list.take(sortx)
dnid_list = dnid_list.take(sortx)
y_true = y_true.take(sortx)
y_score = y_score.take(sortx)
# logger.info(cm.get_annot_ranks(cm.get_top_gt_aids(ibs)))
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(
y_true, y_score
)
if PLOT:
pt.plot2(
recall,
precision,
marker='',
linestyle='-',
x_label='recall',
y_label='precision',
)
avep = sklearn.metrics.average_precision_score(y_true, y_score)
# avep = [
# sklearn.metrics.average_precision_score(y_true, y_score, average=average)
# for average in ['micro', 'macro', 'samples', 'weighted']
# ]
# if np.any(np.isnan(avep)):
# break
# if np.isnan(avep):
# break
avep_list.append(avep)
# mean_ave_precision = np.mean(avep_list, axis=0)
name_to_ave = [np.mean(a) for a in ut.apply_grouping(avep_list, groupxs)]
name_to_ave_ = dict(zip(unique_names, name_to_ave))
logger.info(
'name_to_ave_ = %s' % (ut.align(ut.repr3(name_to_ave_, precision=3), ':'))
)
mean_ave_precision = np.mean(name_to_ave)
logger.info('mean_ave_precision = %r' % (mean_ave_precision,))
map_list.append(mean_ave_precision)
return map_list
def embed_testres(testres):
"""
CommandLine:
python -m wbia TestResults.embed_testres
Example:
>>> # SCRIPT
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(defaultdb='PZ_MTEST')
>>> embed_testres(testres)
"""
ut.embed()
def get_options(testres):
func_list = [
testres.print_results,
testres.draw_rank_cmc,
testres.draw_match_cases,
testres.embed_testres,
]
return func_list
def get_actions(testres):
actions = ut.odict(
[
(testres.print_results, (['print', 'p'], '')),
(testres.draw_rank_cmc, (['cmc'], '')),
(testres.draw_match_cases, (['case'], '')),
(testres.embed_testres, (['embed', 'ipy'], '')),
]
)
return actions
def help(testres):
# list functions that accept the standard interface
prefix = 'wbia'
suffix = testres.reconstruct_test_flags()
func_list = testres.get_options()
funcname_list = [ut.get_funcname(func) for func in func_list]
cmdstr_list = [' '.join([prefix, funcname, suffix]) for funcname in funcname_list]
ut.cprint('Available Functions:', 'blue')
logger.info(', '.join(funcname_list))
ut.cprint('Available Commandline:', 'blue')
logger.info('\n'.join(cmdstr_list))
| 41.211403 | 339 | 0.563803 | [
"Apache-2.0"
] | WildMeOrg/wildbook-ia | wbia/expt/test_result.py | 111,317 | Python |
from neopixel import NeoPixel
from machine import Pin
from time import sleep
from os import urandom
#pin = Pin(14, Pin.OUT)
np = NeoPixel(Pin(2, Pin.OUT), 8)
pin = Pin(14, Pin.IN)
test = 7/10
while True:
can_win = True
if pin.value() == 0:
can_win = False
for i in range(20):
for i in range(0, 8):
np[i] = 150,0,0
np.write()
sleep(1/10)
for i in range(0, 8):
np[i] = 0,0,0
np.write()
test = 7/10
for i in range(0, 8):
np[i] = 0,0,0
np.write()
for i in range(0, 3):
vol = urandom(1)[0]
if vol%3 == 0:
np[i] = 10,0,0
if vol%3 == 1:
np[i] = 0,10,0
if vol%3 == 2:
np[i] = 0,0,10
np.write()
sleep(test)
if pin.value() == 0 and np[0] == np[1] and np[1] == np[2] and can_win:
for i in range(20):
for i in range(0, 8):
np[i] = 0,150,0
np.write()
sleep(1/10)
for i in range(0, 8):
np[i] = 0,0,0
np.write()
test = test*0.7
| 19.770833 | 72 | 0.523709 | [
"MIT"
] | martilad/MI-PYT2018 | lessons/MicroPython/forbes.py | 949 | Python |
import itertools
preamble = 25
numbers = []
with open('input.txt') as file:
for line in file:
numbers.append(int(line))
print(numbers)
for i in range(preamble, len(numbers)):
pairs = itertools.combinations(numbers[i-preamble: i], 2)
match = False
for pair in pairs:
if sum(pair) == numbers[i]:
match = True
break
if not match:
invalid_num = numbers[i]
print(invalid_num)
break
for i in range(len(numbers)):
sum_len = 2
while sum(numbers[i:i+sum_len]) < invalid_num:
sum_len += 1
else:
if sum(numbers[i:i+sum_len]) == invalid_num:
print(numbers[i:i+sum_len])
print(min(numbers[i:i+sum_len]) + max(numbers[i:i+sum_len]))
exit(0)
| 21.972222 | 72 | 0.573957 | [
"MIT"
] | phoenix10k/advent-of-code | aoc-2020/python/day-09/main.py | 791 | Python |
#!/usr/bin/env python
"""Tests for `SEIR` package."""
import os
import pytest
from click.testing import CliRunner
from SEIR import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
import requests
return requests.get('https://github.com/torvalds/linux')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
class UnixFS:
@staticmethod
def rm(filename):
os.remove(filename)
def test_unix_fs(mocker):
mocker.patch('os.remove')
UnixFS.rm('file')
os.remove.assert_called_once_with('file')
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
# result = runner.invoke(cli.main)
# assert result.exit_code == 0
# assert 'SEIR.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert 'Show this message and exit.' in help_result.output
| 22.372549 | 77 | 0.687993 | [
"MIT"
] | sellisd/seir | tests/test_SEIR.py | 1,141 | Python |
# -*- coding: utf-8 -*-
import os
print '-------操作文件和目录-------'
# 操作系统名字
print os.name + '\n'
print '\n' + '详细的系统信息'
print os.uname()
print '\n' + '环境变量'
print os.environ
print '\n' + '获取某个环境变量的值'
print os.getenv('PATH')
print '\n'
# 查看当前目录的绝对路径:
print os.path.abspath('.')
selfAbsPath = os.path.abspath('.')
# 在某个目录下创建一个新目录,
# 首先把新目录的完整路径表示出来:
filePathDir = os.path.join(selfAbsPath, 'testdir')
# '/Users/michael/testdir'
# # 然后创建一个目录:
os.mkdir(filePathDir)
# # 删掉一个目录:
os.rmdir(filePathDir)
print '-------os.path.join()函数-------'
# 这样可以正确处理不同操作系统的路径分隔符
print '-------os.path.split() 直接让你得到文件扩展名-------'
print os.path.split('/Users/michael/testdir/file.txt')
# 对文件重命名:
# os.rename('test.txt', 'test.py')
# 删掉文件:
# os.remove('test.py')
print '-------shutil-------'
# shutil模块提供了copyfile()的函数,你还可以在shutil模块中找到很多实用函数,它们可以看做是os模块的补充。
# 当前目录下的所有目录
print[x for x in os.listdir('.') if os.path.isdir(x)]
# # 当前文件夹下所有python文件
# print [x for x in os.listdir('.') if os.path.isfile(x) and
# os.path.splitext(x)[1]=='.py']
# print os.listdir('.')
# print dir(os.path)
# 编写一个search(s)的函数,能在当前目录以及当前目录的所有子目录下查找文件名包含指定字符串的文件,并打印出完整路径:
def search(fileName):
currentPath = os.path.abspath('.')
for x in os.listdir('.'):
if os.path.isfile(x) and fileName in os.path.splitext(x)[0]:
print x
if os.path.isdir(x):
newP = os.path.join(currentPath, x)
print newP
print '-------search start-------'
search('0810')
| 18.948718 | 68 | 0.627876 | [
"MIT"
] | PomTTcat/pythonPersonTips | Python/pythonLevel1/python0811_file.py | 1,962 | Python |
import berserk
import chaturanga
token = 'token'
bot_id = 'sultankhan2'
session = berserk.TokenSession(token)
lichess = berserk.Client(session)
for event in lichess.bots.stream_incoming_events():
if event['type'] == 'challenge':
challenge = event['challenge']
if challenge['variant']['key'] == 'standard':
if not challenge['rated']:
game_id = challenge['id']
lichess.bots.accept_challenge(game_id)
else:
game_id = event['game']['id']
challenge = {'color': 'random'}
for game_state in lichess.bots.stream_game_state(game_id):
if game_state['type'] == 'gameFull':
if game_state['state']['moves'] == '':
if game_state['initialFen'] == 'startpos':
Chessboard = chaturanga.Chessboard()
else:
Chessboard = chaturanga.Chessboard(
game_state['initialFen'])
if challenge['color'] == 'random':
if 'id' in game_state['white']:
is_white = game_state['white']['id'] == bot_id
else:
is_white = False
else:
is_white = {
'white': False,
'black': True
}[challenge['color']]
if is_white:
bot_move = chaturanga.bot(Chessboard)[0]
Chessboard.move(bot_move)
lichess.bots.make_move(game_id, bot_move)
if game_state['type'] == 'gameState':
moves = game_state['moves'].split(' ')
if len(moves) % 2 != is_white:
Chessboard.move(moves[-1])
bot_move = chaturanga.bot(Chessboard)[0]
Chessboard.move(bot_move)
lichess.bots.make_move(game_id, bot_move)
| 36.480769 | 70 | 0.508698 | [
"MIT"
] | Cheran-Senthil/SultanKhan2 | main.py | 1,897 | Python |
import sys, os, pwd, signal, time
from resource_management import *
from resource_management.core.base import Fail
from resource_management.core.exceptions import ComponentIsNotRunning
from subprocess import call
from impala_base import ImpalaBase
class StateStore(ImpalaBase):
#Call setup.sh to install the service
def install(self, env):
# Install packages listed in metainfo.xml
self.install_packages(env)
self.installImpala(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
#Call start.sh to start the service
def start(self, env):
import params
self.configure(env)
#self.create_hdfs_user(params.flink_user)
cmd = 'service impala-state-store start'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
#Called to stop the service using the pidfile
def stop(self, env):
cmd = 'service impala-state-store stop'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
def restart(self,env):
cmd = 'service impala-state-store stop'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd, ignore_failures=True)
self.start(env)
#Called to get status of the service using the pidfile
def status(self, env):
cmd = 'service impala-state-store status'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
def create_hdfs_user(self, user):
Execute('hadoop fs -mkdir -p /user/'+user, user='hdfs', ignore_failures=True)
Execute('hadoop fs -chown ' + user + ' /user/'+user, user='hdfs')
Execute('hadoop fs -chgrp ' + user + ' /user/'+user, user='hdfs')
if __name__ == "__main__":
StateStore().execute() | 31.789474 | 85 | 0.63521 | [
"Apache-2.0"
] | cas-bigdatalab/ambari-chs | ambari-server/src/main/resources/stacks/HDP/2.5/services/IMPALA/package/scripts/impala-state-store.py | 1,812 | Python |
from django.conf import settings
from django.conf.urls import patterns, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from peekaboo.base.monkeypatches import patch
patch()
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^(?P<path>contribute\.json)$', 'django.views.static.serve',
{'document_root': settings.ROOT}),
(r'', include('peekaboo.main.urls', namespace='main')),
(r'^sheet/', include('peekaboo.sheet.urls', namespace='sheet')),
(r'^auth/', include('peekaboo.authentication.urls', namespace='auth')),
(r'^users/', include('peekaboo.users.urls', namespace='users')),
(r'^locations/',
include('peekaboo.locations.urls', namespace='locations')),
(r'^admin/', include(admin.site.urls)),
(r'^browserid/', include('django_browserid.urls')),
)
# In DEBUG mode, serve media files through Django.
if settings.DEBUG:
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns(
'',
(r'^%s/(?P<path>.*)$' % media_url, 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
urlpatterns += staticfiles_urlpatterns()
| 34.789474 | 75 | 0.681543 | [
"MPL-2.0"
] | mozilla/peekaboo | peekaboo/urls.py | 1,322 | Python |
# Un meșter trebuie să paveze întreaga pardoseală a unei bucătării cu formă
# dreptunghiulară de dimensiune L_1×L_2 centimetri, cu plăci de gresie
# pătrate, toate cu aceeași dimensiune. Știind că meșterul nu vrea să taie nici o
# placă de gresie și vrea să folosească un număr minim de plăci, să se
# determine dimensiunea plăcilor de gresie de care are nevoie, precum și
# numărul lor. De exemplu, dacă L_1=440 cm și L_2=280 cm, atunci meșterul
# are nevoie de 77 de plăci de gresie, fiecare având latura de 40 cm.
L_1 = int(input('L_1: '))
L_2 = int(input('L_2: '))
aria = L_1*L_2
# aflam cmmdc dintre latruri
while L_1 != L_2:
if L_1 > L_2:
L_1 -= L_2
else:
L_2 -= L_1
dim = L_1
nr = aria/dim**2
print(dim, nr)
| 32.26087 | 81 | 0.708895 | [
"MIT"
] | micu01/ProgAlgo | Lab01/lab01_03.py | 773 | Python |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
# These need to be importable by bootstrap.py. If we put them in
# setup.py the import may fail if setuptools in not installed
# in the global python3.
option_requirements = [
('pyzmq', ['--zmq=bundled']),
]
install_requires = [
'gevent==20.6.1',
'greenlet==0.4.16',
'grequests',
'requests==2.23.0',
'ply',
'psutil',
'python-dateutil',
'pytz',
'PyYAML',
'pyzmq',
'setuptools',
'tzlocal',
'pyOpenSSL==19.0.0',
'cryptography==2.3',
# Cross platform way of handling changes in file/directories.
# https://github.com/Bogdanp/watchdog_gevent
'watchdog-gevent',
'wheel==0.30'
]
extras_require = {
'crate': [ # crate databases
'crate'
],
'databases': [ # Support for all known databases
'mysql-connector-python-rf',
'pymongo',
'crate',
'influxdb',
'psycopg2-binary'
],
'dnp3': [ # dnp3 agent requirements.
'pydnp3'
],
'documentation': [ # Requirements for building the documentation
'mock',
'Sphinx',
'recommonmark',
'sphinx-rtd-theme'
],
'drivers': [
'pymodbus',
'bacpypes==0.16.7',
'modbus-tk',
'pyserial'
],
'influxdb': [ # influxdb historian requirements.
'influxdb'
],
'market': [ # Requirements for the market service
'numpy',
'transitions',
],
'mongo': [ # mongo databases
'pymongo',
],
'mysql': [ # mysql databases
'mysql-connector-python-rf',
],
'pandas': [ # numpy and pandas for applications
'numpy',
'pandas',
],
'postgres': [ # numpy and pandas for applications
'psycopg2-binary'
],
'testing': [ # Testing infrastructure dependencies
'mock',
'pytest',
'pytest-timeout',
'websocket-client',
# Allows us to compare nested dictionaries easily.
'deepdiff',
# Allows setup of databases for testing with.
'docker'
],
'web': [ # Web support for launching web based agents including ssl and json web tokens.
'ws4py',
'PyJWT',
'Jinja2',
'passlib',
'argon2-cffi',
'Werkzeug'
],
'weather': [
'Pint'
],
}
| 30.948529 | 95 | 0.646947 | [
"Apache-2.0",
"BSD-2-Clause"
] | craig8/volttron | requirements.py | 4,209 | Python |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
import PIL
import numpy as np
import scipy.sparse
import subprocess
import _pickle as cPickle
import math
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
from .imdb import imdb
from .imdb import ROOT_DIR
from .imdb import MATLAB
from ..utils.cython_bbox import bbox_overlaps
from ..utils.boxes_grid import get_boxes_grid
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..rpn_msr.generate_anchors import generate_anchors
# <<<< obsolete
class pascal_voc(imdb):
def __init__(self, image_set, year, pascal_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._pascal_path = self._get_default_path() if pascal_path is None \
else pascal_path
self._data_path = os.path.join(self._pascal_path, 'VOCdevkit' + self._year, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
self._num_subclasses = 240 + 1
# load the mapping for subcalss to class
filename = os.path.join(self._pascal_path, 'subcategory_exemplars', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.int)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = self._class_to_ind[words[1]]
self._subclass_mapping = mapping
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._pascal_path), \
'PASCAL path does not exist: {}'.format(self._pascal_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._pascal_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(ROOT_DIR, 'data', 'PASCAL')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print( '{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_subcategory_exemplar_annotation(index)
for index in self.image_index]
if cfg.IS_RPN:
# print( out recall
for i in range(1, self.num_classes):
print( '{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))
print( '{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))
print( '{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i])))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print( 'wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
# print( 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = self._class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
anchors = generate_anchors()
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps' : overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def _load_pascal_subcategory_exemplar_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the pascal subcategory exemplar format.
"""
if self._image_set == 'test':
return self._load_pascal_annotation(index)
filename = os.path.join(self._pascal_path, 'subcategory_exemplars', index + '.txt')
assert os.path.exists(filename), \
'Path does not exist: {}'.format(filename)
# the annotation file contains flipped objects
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if subcls != -1:
if is_flip == 0:
lines.append(line)
else:
lines_flipped.append(line)
num_objs = len(lines)
# store information of flipped objects
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
for ix, line in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
# Make pixel indexes 0-based
boxes[ix, :] = [float(n)-1 for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[ix, cls] = 1.0
subindexes[ix, cls] = subcls
subindexes_flipped[ix, cls] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = 2**np.arange(1, 6, 0.5)
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps': overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def region_proposal_roidb(self):
"""
Return the database of regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print( '{} roidb loaded from {}'.format(self.name, cache_file))
return roidb
if self._image_set != 'test':
gt_roidb = self.gt_roidb()
print(( 'Loading region proposal network boxes...'))
model = cfg.REGION_PROPOSAL
rpn_roidb = self._load_rpn_roidb(gt_roidb, model)
print( 'Region proposal network boxes loaded')
roidb = imdb.merge_roidbs(rpn_roidb, gt_roidb)
else:
print( 'Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL
roidb = self._load_rpn_roidb(None, model)
print( 'Region proposal network boxes loaded')
print( '{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index)))
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print( 'wrote roidb to {}'.format(cache_file))
return roidb
def _load_rpn_roidb(self, gt_roidb, model):
# set the prefix
if self._image_set == 'test':
prefix = model + '/testing'
else:
prefix = model + '/training'
box_list = []
for index in self.image_index:
filename = os.path.join(self._pascal_path, 'region_proposals', prefix, index + '.txt')
assert os.path.exists(filename), \
'RPN data not found at: {}'.format(filename)
raw_data = np.loadtxt(filename, dtype=float)
if len(raw_data.shape) == 1:
if raw_data.size == 0:
raw_data = raw_data.reshape((0, 5))
else:
raw_data = raw_data.reshape((1, 5))
x1 = raw_data[:, 0]
y1 = raw_data[:, 1]
x2 = raw_data[:, 2]
y2 = raw_data[:, 3]
score = raw_data[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
raw_data = raw_data[inds,:4]
self._num_boxes_proposal += raw_data.shape[0]
box_list.append(raw_data)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print( '{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print( 'wrote ss roidb to {}'.format(cache_file))
return roidb
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in range(raw_data.shape[0]):
box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_IJCV_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
'{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.
format(self.name, self.config['top_k']))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print( '{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print( 'wrote ss roidb to {}'.format(cache_file))
return roidb
def _load_selective_search_IJCV_roidb(self, gt_roidb):
IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_IJCV_data',
'voc_' + self._year))
assert os.path.exists(IJCV_path), \
'Selective search IJCV data not found at: {}'.format(IJCV_path)
top_k = self.config['top_k']
box_list = []
for i in range(self.num_images):
filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
raw_data = sio.loadmat(filename)
box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _write_voc_results_file(self, all_boxes):
use_salt = self.config['use_salt']
comp_id = 'comp4'
if use_salt:
comp_id += '-{}'.format(os.getpid())
# VOCdevkit/results/VOC2007/Main/comp4-44503_det_test_aeroplane.txt
path = os.path.join(self._pascal_path, 'VOCdevkit' + self._year, 'results', 'VOC' + self._year,
'Main', comp_id + '_')
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print( 'Writing {} VOC results file'.format(cls))
filename = path + 'det_' + self._image_set + '_' + cls + '.txt'
print( filename)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, 4],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
return comp_id
def _do_matlab_eval(self, comp_id, output_dir='output'):
rm_results = self.config['cleanup']
path = os.path.join(os.path.dirname(__file__),
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\',{:d}); quit;"' \
.format(self._pascal_path + '/VOCdevkit' + self._year, comp_id,
self._image_set, output_dir, int(rm_results))
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
# evaluate detection results
def evaluate_detections(self, all_boxes, output_dir):
comp_id = self._write_voc_results_file(all_boxes)
self._do_matlab_eval(comp_id, output_dir)
def evaluate_proposals(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print( 'Writing PASCAL results to file ' + filename)
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(\
dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals_msr(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print( 'Writing PASCAL results to file ' + filename)
with open(filename, 'wt') as f:
dets = all_boxes[im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed; embed()
| 43.070175 | 132 | 0.544399 | [
"MIT"
] | zjjszj/PS_DM_mydetector_faster_rcnn_pytorch | faster_rcnn/datasets/pascal_voc2.py | 29,460 | Python |
from django.urls import include, path
from rest_framework import routers
from .views import classroom, students, teachers, quizzes
urlpatterns = [
path('', classroom.home, name='home'),
path('quizzes/', quizzes.QuizViewSet.as_view({'get': 'list'}), name='quizzes_list'),
path('students/', include(([
path('', students.QuizListView.as_view(), name='quiz_list'),
path('s/', students.StudentList.as_view(), name='student_list'),
path('interests/', students.StudentInterestsView.as_view(), name='student_interests'),
path('taken/', students.TakenQuizListView.as_view(), name='taken_quiz_list'),
path('quiz/<int:pk>/', students.take_quiz, name='take_quiz'),
path('quiz/<int:pk>/studentresults/', students.QuizResultsView.as_view(), name='student_quiz_results'),
], 'classroom'), namespace='students')),
path('teachers/', include(([
path('', teachers.QuizListView.as_view(), name='quiz_change_list'),
path('quiz/add/', teachers.QuizCreateView.as_view(), name='quiz_add'),
path('quiz/<int:pk>/', teachers.QuizUpdateView.as_view(), name='quiz_change'),
path('quiz/<int:pk>/delete/', teachers.QuizDeleteView.as_view(), name='quiz_delete'),
path('quiz/<int:pk>/results/', teachers.QuizResultsView.as_view(), name='quiz_results'),
path('quiz/<int:pk>/question/add/', teachers.question_add, name='question_add'),
path('quiz/<int:quiz_pk>/question/<int:question_pk>/', teachers.question_change, name='question_change'),
path('quiz/<int:quiz_pk>/question/<int:question_pk>/delete/', teachers.QuestionDeleteView.as_view(), name='question_delete'),
], 'classroom'), namespace='teachers')),
]
| 57.2 | 133 | 0.68007 | [
"MIT"
] | mauriciovieira/django-schools | django_school/classroom/urls.py | 1,716 | Python |
#!/usr/bin/env python3
# license removed for brevity
#策略 機械手臂 四點來回跑
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
import .ArmCommand.Hiwin_RT605_Arm_Command as ArmTask
##----Arm state-----------
Arm_state_flag = 0
Strategy_flag = 0
##----Arm status enum
class Arm_status(enum.IntEnum):
Idle = 0
Isbusy = 1
Error = 2
shutdown = 6
##-----------server feedback arm state----------
def Arm_state(req):
global CurrentMissionType,Strategy_flag,Arm_state_flag
Arm_state_flag = int('%s'%req.Arm_state)
if Arm_state_flag == Arm_status.Isbusy: #表示手臂忙碌
Strategy_flag = False
return(1)
if Arm_state_flag == Arm_status.Idle: #表示手臂準備
Strategy_flag = True
return(0)
if Arm_state_flag == Arm_status.shutdown: #表示程式中斷
Strategy_flag = 6
return(6)
def arm_state_server():
#rospy.init_node(NAME)
s = rospy.Service('arm_state',arm_state, Arm_state) ##server arm state
#rospy.spin() ## spin one
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##------------class-------
class point():
def __init__(self,x,y,z,pitch,roll,yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##-------------------------strategy---------------------
##-----Mission 參數
GetInfoFlag = False
ExecuteFlag = False
GetKeyFlag = False
MotionSerialKey = []
MissionType_Flag = 0
MotionStep = 0
##-----手臂動作位置資訊
angle_SubCue = 0
LinePtpFlag = False
MoveFlag = False
PushBallHeight = 6
ObjAboveHeight = 10
SpeedValue = 10
MissionEndFlag = False
CurrentMissionType = 0
##---------------Enum---------------##
class ArmMotionCommand(enum.IntEnum):
Arm_Stop = 0
Arm_MoveToTargetUpside = 1
Arm_MoveFowardDown = 2
Arm_MoveVision = 3
Arm_PushBall = 4
Arm_LineUp = 5
Arm_LineDown = 6
Arm_Angle = 7
Arm_StopPush = 8
class MissionType(enum.IntEnum):
Get_Img = 0
PushBall = 1
Pushback = 2
Mission_End = 3
##-----------switch define------------##
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class Target_pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class TargetPush_pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class Item():
def __init__(self,x,y,label):
self.x = x
self.y = y
self.label = label
def Mission_Trigger():
if GetInfoFlag == True and GetKeyFlag == False and ExecuteFlag == False:
GetInfo_Mission()
if GetInfoFlag == False and GetKeyFlag == True and ExecuteFlag == False:
GetKey_Mission()
if GetInfoFlag == False and GetKeyFlag == False and ExecuteFlag == True:
Execute_Mission()
def GetInfo_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag
#Billiards_Calculation()
GetInfoFlag = False
GetKeyFlag = True
ExecuteFlag = False
def GetKey_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag,MotionKey,MotionSerialKey
Mission = Get_MissionType()
MissionItem(Mission)
MotionSerialKey = MotionKey
GetInfoFlag = False
GetKeyFlag = False
ExecuteFlag = True
def Get_MissionType():
global MissionType_Flag,CurrentMissionType
for case in switch(MissionType_Flag): #傳送指令給socket選擇手臂動作
if case(0):
Type = MissionType.PushBall
MissionType_Flag +=1
break
if case(1):
Type = MissionType.Pushback
MissionType_Flag -=1
break
CurrentMissionType = Type
return Type
def MissionItem(ItemNo):
global MotionKey
Key_PushBallCommand = [\
ArmMotionCommand.Arm_MoveToTargetUpside,\
ArmMotionCommand.Arm_LineDown,\
ArmMotionCommand.Arm_PushBall,\
ArmMotionCommand.Arm_LineUp,\
ArmMotionCommand.Arm_Stop,\
]
Key_PushBackCommand = [\
ArmMotionCommand.Arm_MoveVision,\
ArmMotionCommand.Arm_Stop,\
ArmMotionCommand.Arm_StopPush,\
]
for case in switch(ItemNo): #傳送指令給socket選擇手臂動作
if case(MissionType.PushBall):
MotionKey = Key_PushBallCommand
break
if case(MissionType.Pushback):
MotionKey = Key_PushBackCommand
break
return MotionKey
def Execute_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag,MotionKey,MotionStep,MotionSerialKey,MissionEndFlag,CurrentMissionType,Strategy_flag,Arm_state_flag
if Arm_state_flag == Arm_status.Idle and Strategy_flag == True:
Strategy_flag = False
if MotionKey[MotionStep] == ArmMotionCommand.Arm_Stop:
if MissionEndFlag == True:
CurrentMissionType = MissionType.Mission_End
GetInfoFlag = False
GetKeyFlag = False
ExecuteFlag = False
print("Mission_End")
elif CurrentMissionType == MissionType.PushBall:
GetInfoFlag = False
GetKeyFlag = True
ExecuteFlag = False
MotionStep = 0
print("PushBall")
else:
GetInfoFlag = True
GetKeyFlag = False
ExecuteFlag = False
MotionStep = 0
else:
MotionItem(MotionSerialKey[MotionStep])
MotionStep += 1
def MotionItem(ItemNo):
global angle_SubCue,SpeedValue,PushFlag,LinePtpFlag,MissionEndFlag
SpeedValue = 5
for case in switch(ItemNo): #傳送指令給socket選擇手臂動作
if case(ArmMotionCommand.Arm_Stop):
MoveFlag = False
print("Arm_Stop")
break
if case(ArmMotionCommand.Arm_StopPush):
MoveFlag = False
PushFlag = True #重新掃描物件
print("Arm_StopPush")
break
if case(ArmMotionCommand.Arm_MoveToTargetUpside):
pos.x = 10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 10
MoveFlag = True
LinePtpFlag = False
SpeedValue = 10
print("Arm_MoveToTargetUpside")
break
if case(ArmMotionCommand.Arm_LineUp):
pos.z = ObjAboveHeight
MoveFlag = True
LinePtpFlag = True
SpeedValue = 5
print("Arm_LineUp")
break
if case(ArmMotionCommand.Arm_LineDown):
pos.z = PushBallHeight
MoveFlag = True
LinePtpFlag = True
SpeedValue = 5
print("Arm_LineDown")
break
if case(ArmMotionCommand.Arm_PushBall):
pos.x = -10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = -10
SpeedValue = 10 ##待測試up
MoveFlag = True
LinePtpFlag = False
print("Arm_PushBall")
break
if case(ArmMotionCommand.Arm_MoveVision):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
SpeedValue = 10
MoveFlag = True
LinePtpFlag = False
##任務結束旗標
MissionEndFlag = True
print("Arm_MoveVision")
break
if case(ArmMotionCommand.Arm_MoveFowardDown):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
MoveFlag = True
LinePtpFlag = False
print("Arm_MoveFowardDown")
break
if case(): # default, could also just omit condition or 'if True'
print ("something else!")
# No need to break here, it'll stop anyway
if MoveFlag == True:
if LinePtpFlag == False:
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#strategy_client_Arm_Mode(0,1,0,30,2)#action,ra,grip,vel,both
ArmTask.strategy_client_Arm_Mode(2,1,0,SpeedValue,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
elif LinePtpFlag == True:
#strategy_client_Arm_Mode(0,1,0,40,2)#action,ra,grip,vel,both
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
ArmTask.strategy_client_Arm_Mode(3,1,0,SpeedValue,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
#action: ptp line
#ra : abs rel
#grip 夾爪
#vel speed
#both : Ctrl_Mode
##-------------strategy end ------------
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
argv = rospy.myargv()
rospy.init_node('strategy', anonymous=True)
GetInfoFlag = True #Test no data
arm_state_server()
ArmTask.strategy_client_Arm_Mode(0,1,0,20,2)#action,ra,grip,vel,both
while 1:
Mission_Trigger()
if CurrentMissionType == MissionType.Mission_End:
ArmTask.rospy.on_shutdown(myhook)
ArmTask.rospy.spin()
rospy.spin()
| 31.006061 | 145 | 0.578088 | [
"MIT"
] | SamKaiYang/2019_Hiwin_Shaking | src/.history/Test/HiwinRT605_Strategy_test_20190620132725.py | 10,410 | Python |
import argparse
import multiprocessing
import os
import threading
import time
from typing import Any, Tuple, Union, Dict, Optional
from .helper import _get_event, ConditionalEvent
from ... import __stop_msg__, __ready_msg__, __default_host__
from ...enums import PeaRoleType, RuntimeBackendType, SocketType
from ...excepts import RuntimeFailToStart, RuntimeRunForeverEarlyError
from ...helper import typename
from ...logging.logger import JinaLogger
__all__ = ['BasePea']
def run(
args: 'argparse.Namespace',
name: str,
runtime_cls,
envs: Dict[str, str],
is_started: Union['multiprocessing.Event', 'threading.Event'],
is_shutdown: Union['multiprocessing.Event', 'threading.Event'],
is_ready: Union['multiprocessing.Event', 'threading.Event'],
cancel_event: Union['multiprocessing.Event', 'threading.Event'],
):
"""Method representing the :class:`BaseRuntime` activity.
This method is the target for the Pea's `thread` or `process`
.. note::
:meth:`run` is running in subprocess/thread, the exception can not be propagated to the main process.
Hence, please do not raise any exception here.
.. note::
Please note that env variables are process-specific. Subprocess inherits envs from
the main process. But Subprocess's envs do NOT affect the main process. It does NOT
mess up user local system envs.
.. warning::
If you are using ``thread`` as backend, envs setting will likely be overidden by others
:param args: namespace args from the Pea
:param name: name of the Pea to have proper logging
:param runtime_cls: the runtime class to instantiate
:param envs: a dictionary of environment variables to be set in the new Process
:param is_started: concurrency event to communicate runtime is properly started. Used for better logging
:param is_shutdown: concurrency event to communicate runtime is terminated
:param is_ready: concurrency event to communicate runtime is ready to receive messages
:param cancel_event: concurrency event to receive cancelling signal from the Pea. Needed by some runtimes
"""
logger = JinaLogger(name, **vars(args))
def _unset_envs():
if envs and args.runtime_backend != RuntimeBackendType.THREAD:
for k in envs.keys():
os.unsetenv(k)
def _set_envs():
if args.env:
if args.runtime_backend == RuntimeBackendType.THREAD:
logger.warning(
'environment variables should not be set when runtime="thread".'
)
else:
os.environ.update({k: str(v) for k, v in envs.items()})
try:
_set_envs()
runtime = runtime_cls(
args=args,
cancel_event=cancel_event,
)
except Exception as ex:
logger.error(
f'{ex!r} during {runtime_cls!r} initialization'
+ f'\n add "--quiet-error" to suppress the exception details'
if not args.quiet_error
else '',
exc_info=not args.quiet_error,
)
else:
is_started.set()
with runtime:
is_ready.set()
runtime.run_forever()
finally:
_unset_envs()
is_shutdown.set()
class BasePea:
"""
:class:`BasePea` is a thread/process- container of :class:`BaseRuntime`. It leverages :class:`threading.Thread`
or :class:`multiprocessing.Process` to manage the lifecycle of a :class:`BaseRuntime` object in a robust way.
A :class:`BasePea` must be equipped with a proper :class:`Runtime` class to work.
"""
def __init__(self, args: 'argparse.Namespace'):
super().__init__() #: required here to call process/thread __init__
self.args = args
self.name = self.args.name or self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
if self.args.runtime_backend == RuntimeBackendType.THREAD:
self.logger.warning(
f' Using Thread as runtime backend is not recommended for production purposes. It is '
f'just supposed to be used for easier debugging. Besides the performance considerations, it is'
f'specially dangerous to mix `Executors` running in different types of `RuntimeBackends`.'
)
self._envs = {'JINA_POD_NAME': self.name, 'JINA_LOG_ID': self.args.identity}
if self.args.quiet:
self._envs['JINA_LOG_CONFIG'] = 'QUIET'
if self.args.env:
self._envs.update(self.args.env)
# arguments needed to create `runtime` and communicate with it in the `run` in the stack of the new process
# or thread. Control address from Zmqlet has some randomness and therefore we need to make sure Pea knows
# control address of runtime
self.runtime_cls = self._get_runtime_cls()
self._timeout_ctrl = self.args.timeout_ctrl
self._set_ctrl_adrr()
test_worker = {
RuntimeBackendType.THREAD: threading.Thread,
RuntimeBackendType.PROCESS: multiprocessing.Process,
}.get(getattr(args, 'runtime_backend', RuntimeBackendType.THREAD))()
self.is_ready = _get_event(test_worker)
self.is_shutdown = _get_event(test_worker)
self.cancel_event = _get_event(test_worker)
self.is_started = _get_event(test_worker)
self.ready_or_shutdown = ConditionalEvent(
getattr(args, 'runtime_backend', RuntimeBackendType.THREAD),
events_list=[self.is_ready, self.is_shutdown],
)
self.worker = {
RuntimeBackendType.THREAD: threading.Thread,
RuntimeBackendType.PROCESS: multiprocessing.Process,
}.get(getattr(args, 'runtime_backend', RuntimeBackendType.THREAD))(
target=run,
kwargs={
'args': args,
'name': self.name,
'envs': self._envs,
'is_started': self.is_started,
'is_shutdown': self.is_shutdown,
'is_ready': self.is_ready,
'cancel_event': self.cancel_event,
'runtime_cls': self.runtime_cls,
},
)
self.daemon = self.args.daemon #: required here to set process/thread daemon
def _set_ctrl_adrr(self):
"""Sets control address for different runtimes"""
self.runtime_ctrl_address = self.runtime_cls.get_control_address(
host=self.args.host,
port=self.args.port_ctrl,
docker_kwargs=getattr(self.args, 'docker_kwargs', None),
)
if not self.runtime_ctrl_address:
self.runtime_ctrl_address = f'{self.args.host}:{self.args.port_in}'
def start(self):
"""Start the Pea.
This method calls :meth:`start` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
.. #noqa: DAR201
"""
self.worker.start()
if not self.args.noblock_on_start:
self.wait_start_success()
return self
def join(self, *args, **kwargs):
"""Joins the Pea.
This method calls :meth:`join` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
:param args: extra positional arguments to pass to join
:param kwargs: extra keyword arguments to pass to join
"""
self.worker.join(*args, **kwargs)
def terminate(self):
"""Terminate the Pea.
This method calls :meth:`terminate` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
"""
if hasattr(self.worker, 'terminate'):
self.worker.terminate()
def _retry_control_message(self, command: str, num_retry: int = 3):
from ..zmq import send_ctrl_message
for retry in range(1, num_retry + 1):
self.logger.debug(f'Sending {command} command for the {retry}th time')
try:
send_ctrl_message(
self.runtime_ctrl_address,
command,
timeout=self._timeout_ctrl,
raise_exception=True,
)
break
except Exception as ex:
self.logger.warning(f'{ex!r}')
if retry == num_retry:
raise ex
def activate_runtime(self):
""" Send activate control message. """
self.runtime_cls.activate(
logger=self.logger,
socket_in_type=self.args.socket_in,
control_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
)
def _cancel_runtime(self, skip_deactivate: bool = False):
"""
Send terminate control message.
:param skip_deactivate: Mark that the DEACTIVATE signal may be missed if set to True
"""
self.runtime_cls.cancel(
cancel_event=self.cancel_event,
logger=self.logger,
socket_in_type=self.args.socket_in,
control_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
skip_deactivate=skip_deactivate,
)
def _wait_for_ready_or_shutdown(self, timeout: Optional[float]):
"""
Waits for the process to be ready or to know it has failed.
:param timeout: The time to wait before readiness or failure is determined
.. # noqa: DAR201
"""
return self.runtime_cls.wait_for_ready_or_shutdown(
timeout=timeout,
ready_or_shutdown_event=self.ready_or_shutdown.event,
ctrl_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
shutdown_event=self.is_shutdown,
)
def wait_start_success(self):
"""Block until all peas starts successfully.
If not success, it will raise an error hoping the outer function to catch it
"""
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
if self._wait_for_ready_or_shutdown(_timeout):
if self.is_shutdown.is_set():
# return too early and the shutdown is set, means something fails!!
if not self.is_started.is_set():
raise RuntimeFailToStart
else:
raise RuntimeRunForeverEarlyError
else:
self.logger.success(__ready_msg__)
else:
_timeout = _timeout or -1
self.logger.warning(
f'{self.runtime_cls!r} timeout after waiting for {self.args.timeout_ready}ms, '
f'if your executor takes time to load, you may increase --timeout-ready'
)
self.close()
raise TimeoutError(
f'{typename(self)}:{self.name} can not be initialized after {_timeout * 1e3}ms'
)
@property
def _is_dealer(self):
"""Return true if this `Pea` must act as a Dealer responding to a Router
.. # noqa: DAR201
"""
return self.args.socket_in == SocketType.DEALER_CONNECT
def close(self) -> None:
"""Close the Pea
This method makes sure that the `Process/thread` is properly finished and its resources properly released
"""
# if that 1s is not enough, it means the process/thread is still in forever loop, cancel it
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self.is_ready.is_set() and not self.is_shutdown.is_set():
try:
self._cancel_runtime()
if not self.is_shutdown.wait(timeout=self._timeout_ctrl):
self.terminate()
time.sleep(0.1)
raise Exception(
f'Shutdown signal was not received for {self._timeout_ctrl}'
)
except Exception as ex:
self.logger.error(
f'{ex!r} during {self.close!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else '',
exc_info=not self.args.quiet_error,
)
# if it is not daemon, block until the process/thread finish work
if not self.args.daemon:
self.join()
elif self.is_shutdown.is_set():
# here shutdown has been set already, therefore `run` will gracefully finish
pass
else:
# sometimes, we arrive to the close logic before the `is_ready` is even set.
# Observed with `gateway` when Pods fail to start
self.logger.warning(
'Pea is being closed before being ready. Most likely some other Pea in the Flow or Pod '
'failed to start'
)
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self._wait_for_ready_or_shutdown(_timeout):
if not self.is_shutdown.is_set():
self._cancel_runtime(skip_deactivate=True)
if not self.is_shutdown.wait(timeout=self._timeout_ctrl):
self.terminate()
time.sleep(0.1)
raise Exception(
f'Shutdown signal was not received for {self._timeout_ctrl}'
)
else:
self.logger.warning(
'Terminating process after waiting for readiness signal for graceful shutdown'
)
# Just last resource, terminate it
self.terminate()
time.sleep(0.1)
self.logger.debug(__stop_msg__)
self.logger.close()
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_runtime_cls(self) -> Tuple[Any, bool]:
from .helper import update_runtime_cls
from ..runtimes import get_runtime
update_runtime_cls(self.args)
return get_runtime(self.args.runtime_cls)
@property
def role(self) -> 'PeaRoleType':
"""Get the role of this pea in a pod
.. #noqa: DAR201"""
return self.args.pea_role
@property
def _is_inner_pea(self) -> bool:
"""Determine whether this is a inner pea or a head/tail
.. #noqa: DAR201"""
return self.role is PeaRoleType.SINGLETON or self.role is PeaRoleType.PARALLEL
| 38.984211 | 115 | 0.605778 | [
"Apache-2.0"
] | MaxielMrvaljevic/jina | jina/peapods/peas/__init__.py | 14,814 | Python |
from getpass import getuser
from os.path import abspath, join, exists
from uuid import uuid4
from seisflows.tools import unix
from seisflows.tools.config import ParameterError, SeisflowsParameters, SeisflowsPaths, custom_import
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class tiger_md(custom_import('system', 'slurm_md')):
""" Specially designed system interface for tiger.princeton.edu
See parent class for more information.
"""
def check(self):
""" Checks parameters and paths
"""
if 'UUID' not in PAR:
setattr(PAR, 'UUID', str(uuid4()))
if 'SCRATCH' not in PATH:
setattr(PATH, 'SCRATCH', join('/scratch/gpfs', getuser(), 'seisflows', PAR.UUID))
if 'LOCAL' not in PATH:
setattr(PATH, 'LOCAL', '')
super(tiger_md, self).check()
def submit(self, *args, **kwargs):
""" Submits job
"""
if not exists(PATH.SUBMIT + '/' + 'scratch'):
unix.ln(PATH.SCRATCH, PATH.SUBMIT + '/' + 'scratch')
super(tiger_md, self).submit(*args, **kwargs)
| 26.285714 | 101 | 0.621377 | [
"BSD-2-Clause"
] | chukren/seisflows | seisflows/system/tiger_md.py | 1,104 | Python |
# discover devices
# by clach04 (https://github.com/clach04)
# found at https://github.com/codetheweb/tuyapi/issues/5#issuecomment-348799963
# tested with Python 3.6.4 on Gentoo Linux
import socket
import struct
host_port = 6666
host_ip = '239.255.255.250'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.bind(('', host_port))
mreq = struct.pack('4sl', socket.inet_aton(host_ip), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
try:
while True:
print('listening')
data = sock.recvfrom(1024)
raw_bytes, peer_info = data
print(data)
finally:
sock.close()
| 26.466667 | 79 | 0.735516 | [
"Apache-2.0"
] | SDNick484/rectec_status | discover_devices.py | 794 | Python |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from pandas.tseries.holiday import (
GoodFriday,
USLaborDay,
USPresidentsDay,
USThanksgivingDay,
)
from pytz import timezone
from .trading_calendar import HolidayCalendar, TradingCalendar
from .us_holidays import (
Christmas,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
USBlackFridayInOrAfter1993,
USIndependenceDay,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USNationalDaysofMourning,
USNewYearsDay,
)
# Useful resources for making changes to this file:
# http://www.cmegroup.com/tools-information/holiday-calendar.html
class CMESExchangeCalendar(TradingCalendar):
"""
Exchange calendar for the Chicago Mercantile Exchange (CMES).
Open Time: 5:00 PM, America/Chicago
Close Time: 5:00 PM, America/Chicago
Regularly-Observed Holidays:
- New Years Day
- Good Friday
- Christmas
"""
name = "CME"
country_code = "US"
tz = timezone("America/Chicago")
open_times = ((None, time(17, 1)),)
close_times = ((None, time(16)),)
@property
def open_offset(self):
return -1
@property
def regular_holidays(self):
# The CME has different holiday rules depending on the type of
# instrument. For example, http://www.cmegroup.com/tools-information/holiday-calendar/files/2016-4th-of-july-holiday-schedule.pdf # noqa
# shows that Equity, Interest Rate, FX, Energy, Metals & DME Products
# close at 1200 CT on July 4, 2016, while Grain, Oilseed & MGEX
# Products and Livestock, Dairy & Lumber products are completely
# closed.
# For now, we will treat the CME as having a single calendar, and just
# go with the most conservative hours - and treat July 4 as an early
# close at noon.
return HolidayCalendar(
[
USNewYearsDay,
GoodFriday,
Christmas,
]
)
@property
def adhoc_holidays(self):
return USNationalDaysofMourning
@property
def special_closes(self):
return [
(
time(12),
HolidayCalendar(
[
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
]
),
)
]
| 29.196429 | 144 | 0.624159 | [
"Apache-2.0"
] | quantrocket-llc/trading-calendars | trading_calendars/exchange_calendar_cmes.py | 3,270 | Python |
# -*- coding: utf-8 -*-
import xlrd
import pandas as pd
import numpy as np
mirna_sim_path = '../data/miRNA_sim.xlsx'
disease_sim_path = '../data/disease_sim.xlsx'
mirna_disease_ass_path = '../data/miRNA_disease.csv'
mirna_data_dict_path = "../data/mirna_data_dict.npy"
disease_data_dict_path = "../data/disease_data_dict.npy"
mirna_data_dict = np.load(mirna_data_dict_path, allow_pickle=True).item()
disease_data_dict = np.load(disease_data_dict_path, allow_pickle=True).item()
def get_mirna_sim():
data = xlrd.open_workbook(mirna_sim_path)
table = data.sheets()[0]
nrows = table.nrows
mirna_sim_dict = {}
for cnt in range(nrows):
value = table.row_values(cnt)
mirna_sim_dict[cnt+1] = value
return mirna_sim_dict
def get_disease_sim():
data = xlrd.open_workbook(disease_sim_path)
table = data.sheets()[0]
nrows = table.nrows
disease_sim_dict = {}
for cnt in range(nrows):
value = table.row_values(cnt)
disease_sim_dict[cnt+1] = value
return disease_sim_dict
def get_data(data_path):
mm_sim_dict = get_mirna_sim()
dd_sim_dict = get_disease_sim()
total_sample = []
Label = []
with open(data_path) as f:
for line in f:
item = line.strip().split('\t')
mirna = int(item[0])
disease = int(item[1])
label = int(item[2])
Label.append(label)
mirna_ver = mm_sim_dict[mirna] + mirna_data_dict[mirna].tolist()
disease_ver = dd_sim_dict[disease] + disease_data_dict[disease].tolist()
ver = mirna_ver + disease_ver
total_sample.append(ver)
total_sample.reverse()
Label.reverse()
return total_sample, Label
def get_train_data():
data_path = '../data/train_data.txt'
total_sample, label = get_data(data_path)
return total_sample, label
| 29.784615 | 85 | 0.642562 | [
"MIT"
] | dayunliu/SMALF | code/get_data.py | 1,936 | Python |
from unittest import TestCase
from family_tree.member import Member
from family_tree import constants
from family_tree.family_tree import FamilyTree
class TestFamilyTree(TestCase):
def setUp(self):
self.ftree = FamilyTree()
def test_add_child(self):
result = self.ftree.add_child("Father", "Male")
self.assertEqual(result, constants.CHILD_ADDITION_SUCCEEDED)
self.assertEqual(
self.ftree.family_tree.get("Father", None) is not None,
True
)
self.assertEqual(
self.ftree.add_child("Zim", "Male", "Mother"),
constants.PERSON_NOT_FOUND
)
self.assertEqual(
self.ftree.add_child("Zim", "Male", "Father"),
constants.CHILD_ADDITION_FAILED
)
mother = Member(2, "Mother", "Female")
mother.spouse = self.ftree.family_tree["Father"]
self.ftree.family_tree["Father"].set_spouse(mother)
self.ftree.family_tree["Mother"] = mother
self.assertEqual(
self.ftree.add_child("Zim", "Male", "Mother"),
constants.CHILD_ADDITION_SUCCEEDED
)
self.assertEqual(
self.ftree.add_child("Zim", "Male", "Mother"),
constants.CHILD_ADDITION_FAILED
)
self.assertEqual(
self.ftree.family_tree.get("Zim", None) is not None,
True
)
def test_add_spouse(self):
self.assertEqual(
self.ftree.add_spouse("Wife", "Female", "Zim"),
constants.SPOUSE_ADDITION_FAILED
)
dummy_member = Member(1, "DummyMember", "Male")
self.ftree.family_tree['DummyMember'] = dummy_member
self.assertEqual(
self.ftree.add_spouse("Wife", "Female", "Zim"),
constants.PERSON_NOT_FOUND
)
spouse_a = Member(1, "FakeMember", "Female")
spouse_b = Member(1, "AlreadyMarriedMember", "Male")
spouse_b.set_spouse(spouse_a)
spouse_c = Member(1, "Zim", "Male")
self.ftree.family_tree["FakeMember"] = spouse_a
self.ftree.family_tree["AlreadyMarriedMember"] = spouse_b
self.ftree.family_tree["Zim"] = spouse_c
self.assertEqual(
self.ftree.add_spouse("Wife", "Female", "FakeMember"),
constants.SPOUSE_ADDITION_FAILED
)
self.assertEqual(
self.ftree.add_spouse("Wife", "Female", "AlreadyMarriedMember"),
constants.SPOUSE_ADDITION_FAILED
)
self.assertEqual(
self.ftree.add_spouse("Wife", "Female", "Zim"),
constants.SPOUSE_ADDITION_SUCCEEDED
)
self.assertEqual(
self.ftree.add_spouse("Wife", "Female", "Zim"),
constants.SPOUSE_ADDITION_FAILED
)
def test_get_relationship(self):
self.assertEqual(
self.ftree.get_relationship("Zim", "brother_in_law"),
constants.PERSON_NOT_FOUND
)
member = Member(1, "Zim", "Male")
son_a = Member(2, "SonA", "Male")
son_b = Member(3, "SonB", "Male")
member.add_child(son_b)
member.add_child(son_a)
son_a.set_father(member)
son_b.set_father(member)
self.ftree.family_tree["Zim"] = member
self.ftree.family_tree["SonA"] = son_a
self.ftree.family_tree["SonB"] = son_b
self.assertEqual(
self.ftree.get_relationship("Zim", "daughter"),
constants.NONE
)
self.assertEqual(
self.ftree.get_relationship("Zim", "son"), "SonA SonB")
| 34.76699 | 76 | 0.596481 | [
"MIT"
] | Zim95/meet_the_family | tests/integration/test_family_tree.py | 3,581 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.stack_dynamic_partitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSegmentStackOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict( # empty inputs
data=[],
partitions=[],
num_partitions=0,
expected=[],
expected_ragged_rank=1),
dict( # empty data, num_partitions>0
data=[],
partitions=[],
num_partitions=3,
expected=[[], [], []]),
dict( # 1D data, 1D partitions (docstring example)
data=['a', 'b', 'c', 'd', 'e'],
partitions=[3, 0, 2, 2, 3],
num_partitions=5,
expected=[['b'], [], ['c', 'd'], ['a', 'e'], []]),
dict( # 2D data, 1D partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['c', 'd']], [['a', 'b'], ['e', 'f']], [['g', 'h']]],
expected_ragged_rank=1),
dict( # 2D ragged data, 1D partitions
data=[['a'], ['b', 'c', 'd'], [], ['e', 'f']],
data_ragged_rank=1,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['b', 'c', 'd']], [['a'], []], [['e', 'f']]],
expected_ragged_rank=2),
dict( # 2D data, 2D partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict( # 2D ragged data, 2D ragged partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict( # 3D data, 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[1, 0],
num_partitions=2,
expected=[[[['e', 'f'], ['g', 'h']]], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=1), 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=2),
dict( # 3D data (ragged_rank=2), 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f', 'g', 'h']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=3),
dict( # 3D data, 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[[1, 0], [0, 3]],
segment_ids_ragged_rank=0,
num_partitions=4,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']], [], [['g', 'h']]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=1), 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=2,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=2), 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=3,
expected=[[['c', 'd'], ['e', 'f', 'g', 'h']], [['a', 'b']], []],
expected_ragged_rank=2),
dict( # 3D data (ragged_rank=2), 3d partitions (ragged_rank=2)
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[[3, 0], [1, 2]], [[1, 1, 0, 1]]],
segment_ids_ragged_rank=2,
num_partitions=4,
expected=[['b', 'g'], ['c', 'e', 'f', 'h'], ['d'], ['a']]),
dict( # 0D data, 0D partitions
data='a',
partitions=3,
num_partitions=5,
expected=[[], [], [], ['a'], []]),
dict( # 1D data, 0D partitions
data=['a', 'b', 'c'],
partitions=3,
num_partitions=5,
expected=[[], [], [], [['a', 'b', 'c']], []],
expected_ragged_rank=1),
dict( # 2D data, 0D partitions
data=[['a', 'b'], ['c', 'd']],
data_ragged_rank=0,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c', 'd']]], []],
expected_ragged_rank=1),
dict( # 2D data (ragged_rank=1), 0D partitions
data=[['a', 'b'], ['c']],
data_ragged_rank=1,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c']]], []],
expected_ragged_rank=3),
])
def testRaggedSegmentStack(self,
data,
partitions,
num_partitions,
expected,
data_ragged_rank=None,
segment_ids_ragged_rank=None,
expected_ragged_rank=None):
for seg_dtype in [dtypes.int32, dtypes.int64]:
data_tensor = ragged_factory_ops.constant(
data, row_splits_dtype=seg_dtype, ragged_rank=data_ragged_rank)
segment_ids_tensor = ragged_factory_ops.constant(
partitions,
dtype=seg_dtype,
row_splits_dtype=seg_dtype,
ragged_rank=segment_ids_ragged_rank)
expected_tensor = ragged_factory_ops.constant(
expected,
row_splits_dtype=seg_dtype,
ragged_rank=expected_ragged_rank)
result = ragged_array_ops.stack_dynamic_partitions(
data_tensor, segment_ids_tensor, num_partitions)
self.assertAllEqual(result, expected_tensor)
# Check that it's equivalent to tf.stack(dynamic_partition(...)),
# where applicable.
if (data_ragged_rank == 0 and segment_ids_ragged_rank == 0 and
seg_dtype == dtypes.int32):
equiv = ragged_concat_ops.stack(
data_flow_ops.dynamic_partition(data_tensor, segment_ids_tensor,
num_partitions))
self.assertAllEqual(result, self.evaluate(equiv).to_list())
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[2, -1, 0],
num_partitions=10,
error='must be non-negative'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=1,
error='partitions must be less than num_partitions'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=10,
error='partitions must be less than num_partitions'),
dict(
data=[['a', 'b'], ['c']],
partitions=[[2], [3, 0]],
num_partitions=10,
error='data and partitions have incompatible ragged shapes'),
])
def testRuntimeError(self, data, partitions, num_partitions, error):
data = ragged_factory_ops.constant(data)
partitions = ragged_factory_ops.constant(partitions, dtype=dtypes.int64)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
self.evaluate(
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions))
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[1, 2],
num_partitions=10,
error=r'Shapes \(2,\) and \(3,\) are incompatible'),
dict(
data=[['a', 'b'], ['c', 'd']],
partitions=[[1, 2, 3], [4, 5, 6]],
num_partitions=10,
error=r'Shapes \(2, 3\) and \(2, 2\) are incompatible'),
dict(
data=['a', 'b', 'c'],
partitions=[1, 2, 3],
num_partitions=[1, 2, 3],
error='must have rank 0'),
])
def testStaticError(self, data, partitions, num_partitions, error):
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions)
def testUnknownRankError(self):
if context.executing_eagerly():
return
partitions = array_ops.placeholder(dtypes.int32, None)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
'partitions must have known rank'):
ragged_array_ops.stack_dynamic_partitions(['a', 'b', 'c'], partitions, 10)
if __name__ == '__main__':
googletest.main()
| 40.868217 | 80 | 0.519537 | [
"Apache-2.0"
] | 00arun00/tensorflow | tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py | 10,544 | Python |
from setuptools import setup
setup(
name="sphinx_rtd_theme_http",
version="1.0.0",
author="Ashley Whetter",
url="https://github.com/AWhetter/sphinx_rtd_theme_http/browse",
py_modules=["sphinx_rtd_theme_http"],
install_requires=[
"sphinx_rtd_theme",
],
classifiers=[
'Framework :: Sphinx',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
"License :: OSI Approved :: BSD License",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
)
| 30.666667 | 67 | 0.607488 | [
"BSD-3-Clause"
] | AWhetter/sphinx_rtd_theme_http | setup.py | 828 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
1351.
"""
from test_framework.blocktools import create_block, create_coinbase, make_conform_to_ctor
from test_framework.messages import (
CTransaction,
FromHex,
msg_block,
msg_tx,
ToHex,
)
from test_framework.mininode import (
mininode_lock,
network_thread_start,
P2PInterface,
)
from test_framework.script import (
CScript,
CScriptNum,
OP_1NEGATE,
OP_CHECKLOCKTIMEVERIFY,
OP_DROP,
OP_TRUE,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.txtools import pad_tx
from test_framework.util import assert_equal, wait_until
CLTV_HEIGHT = 1351
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
def cltv_lock_to_height(node, tx, to_address, amount, height=-1):
'''Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make
a transaction that spends it.
This transforms the output script to anyone can spend (OP_TRUE) if the
lock time condition is valid.
Default height is -1 which leads CLTV to fail
TODO: test more ways that transactions using CLTV could be invalid (eg
locktime requirements fail, sequence time requirements fail, etc).
'''
height_op = OP_1NEGATE
if(height > 0):
tx.vin[0].nSequence = 0
tx.nLockTime = height
height_op = CScriptNum(height)
tx.vout[0].scriptPubKey = CScript(
[height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE])
pad_tx(tx)
fundtx_raw = node.signrawtransactionwithwallet(ToHex(tx))['hex']
fundtx = FromHex(CTransaction(), fundtx_raw)
fundtx.rehash()
# make spending tx
from_txid = fundtx.hash
inputs = [{
"txid": fundtx.hash,
"vout": 0
}]
output = {to_address: amount}
spendtx_raw = node.createrawtransaction(inputs, output)
spendtx = FromHex(CTransaction(), spendtx_raw)
pad_tx(spendtx)
return fundtx, spendtx
def spend_from_coinbase(node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{"txid": from_txid, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = FromHex(CTransaction(), signresult['hex'])
return tx
class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
# wait_for_verack ensures that the P2P connection is fully up.
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2))
self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info(
"Test that an invalid-according-to-CLTV transaction can still appear in a block")
fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[0],
self.nodeaddress, 49.99)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49.98)
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(
CLTV_HEIGHT - 1), block_time)
block.nVersion = 3
block.vtx.append(fundtx)
# include the -1 CLTV in block
block.vtx.append(spendtx)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
# This block is valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 4")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(),
lock=mininode_lock)
with mininode_lock:
assert_equal(
self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(
self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)')
assert_equal(
self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info(
"Test that invalid-according-to-cltv transactions cannot appear in a block")
block.nVersion = 4
fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[1],
self.nodeaddress, 49.99)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49.98)
# The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is valid.
self.nodes[0].p2p.send_and_ping(msg_tx(fundtx))
assert fundtx.hash in self.nodes[0].getrawmempool()
# Mine a block containing the funding transaction
block.vtx.append(fundtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
# This block is valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# We show that this tx is invalid due to CLTV by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False,
'reject-reason': '64: non-mandatory-script-verify-flag (Negative locktime)'}],
self.nodes[0].testmempoolaccept(
rawtxs=[spendtx.serialize().hex()], allowhighfees=True)
)
rejectedtx_signed = self.nodes[0].signrawtransactionwithwallet(
ToHex(spendtx))
# Couldn't complete signature due to CLTV
assert(rejectedtx_signed['errors'][0]['error'] == 'Negative locktime')
tip = block.hash
block_time += 1
block = create_block(
block.sha256, create_coinbase(CLTV_HEIGHT+1), block_time)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
# This block is invalid
assert_equal(self.nodes[0].getbestblockhash(), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(),
lock=mininode_lock)
with mininode_lock:
assert self.nodes[0].p2p.last_message["reject"].code in [
REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(
self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(
self.nodes[0].p2p.last_message["reject"].reason, b'blk-bad-inputs')
else:
assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info(
"Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[2],
self.nodeaddress, 49.99)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49.98, CLTV_HEIGHT)
# make sure sequence is nonfinal and locktime is good
spendtx.vin[0].nSequence = 0xfffffffe
spendtx.nLockTime = CLTV_HEIGHT
# both transactions are fully valid
self.nodes[0].sendrawtransaction(ToHex(fundtx))
self.nodes[0].sendrawtransaction(ToHex(spendtx))
# Modify the transactions in the block to be valid against CLTV
block.vtx.pop(1)
block.vtx.append(fundtx)
block.vtx.append(spendtx)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
# This block is now valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
if __name__ == '__main__':
BIP65Test().main()
| 36 | 97 | 0.650637 | [
"MIT"
] | ComputerCraftr/devault | test/functional/feature_cltv.py | 9,108 | Python |
"""
Taken from vithursant's repo:
https://github.com/vithursant/MagnetLoss-PyTorch/blob/master/magnet_loss/magnet_loss.py
"""
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class MagnetLoss(nn.Module):
"""
Magnet loss technique presented in the paper:
''Metric Learning with Adaptive Density Discrimination'' by Oren Rippel, Manohar Paluri, Piotr Dollar, Lubomir Bourdev in
https://research.fb.com/wp-content/uploads/2016/05/metric-learning-with-adaptive-density-discrimination.pdf?
Args:
r: A batch of features.
classes: Class labels for each example.
clusters: Cluster labels for each example.
cluster_classes: Class label for each cluster.
n_clusters: Total number of clusters.
alpha: The cluster separation gap hyperparameter.
Returns:
total_loss: The total magnet loss for the batch.
losses: The loss for each example in the batch.
"""
def __init__(self, m, d, alpha=1.0, L=128, style='closest'):
super(MagnetLoss, self).__init__()
self.r = None
self.classes = None
self.clusters = None
self.cluster_classes = None
self.n_clusters = None
self.alpha = alpha
self.L = L
self.style = style
self.n_clusters = m
self.examples_per_cluster = d
self.variances = torch.tensor([0.0])
def forward(self, input, target): # reps and classes, x and y
GPU_INT_DTYPE = torch.cuda.IntTensor
GPU_LONG_DTYPE = torch.cuda.LongTensor
GPU_FLOAT_DTYPE = torch.cuda.FloatTensor
self.r = input
classes = target.cpu().numpy()
self.classes = torch.from_numpy(classes).type(GPU_LONG_DTYPE)
self.clusters, _ = torch.sort(torch.arange(0, float(self.n_clusters)).repeat(self.examples_per_cluster))
self.clusters = self.clusters.type(GPU_INT_DTYPE)
self.cluster_classes = self.classes[0:self.n_clusters*self.examples_per_cluster:self.examples_per_cluster]
# Take cluster means within the batch
cluster_examples = dynamic_partition(self.r, self.clusters, self.n_clusters)
cluster_means = torch.stack([torch.mean(x, dim=0) for x in cluster_examples])
sample_costs = compute_euclidean_distance(cluster_means, expand_dims(self.r, 1))
self.sample_costs = sample_costs
clusters_tensor = self.clusters.type(GPU_FLOAT_DTYPE)
n_clusters_tensor = torch.arange(0, self.n_clusters).type(GPU_FLOAT_DTYPE)
intra_cluster_mask = Variable(comparison_mask(clusters_tensor, n_clusters_tensor).type(GPU_FLOAT_DTYPE))
intra_cluster_costs = torch.sum(intra_cluster_mask * sample_costs, dim=1)
N = self.r.size()[0] # N = M*D (Batch size)
variance = torch.sum(intra_cluster_costs) / float(N - 1)
# self.variances = np.hstack((self.variances, variance.data.cpu().numpy()))
self.variances = torch.cat((self.variances, variance.unsqueeze(0).cpu()), 0)
var_normalizer = -1 / (2 * variance**2)
# Compute numerator
numerator = torch.exp(var_normalizer * intra_cluster_costs - self.alpha)
classes_tensor = self.classes.type(GPU_FLOAT_DTYPE)
cluster_classes_tensor = self.cluster_classes.type(GPU_FLOAT_DTYPE)
# Compute denominator
diff_class_mask = Variable(comparison_mask(classes_tensor, cluster_classes_tensor).type(GPU_FLOAT_DTYPE))
diff_class_mask = 1 - diff_class_mask # Logical not on ByteTensor
denom_sample_costs = torch.exp(var_normalizer * sample_costs)
denominator = torch.sum(diff_class_mask * denom_sample_costs, dim=1)
epsilon = 1e-8
losses = F.relu(-torch.log(numerator / (denominator + epsilon) + epsilon))
total_loss = torch.mean(losses)
if self.style == 'closest': # acts on the clusters in this batch/episode rather than those calculate over the entire set!!
_, pred = sample_costs.min(1)
acc = pred.eq(clusters_tensor.type(GPU_LONG_DTYPE)).float().mean()
else:
raise NotImplementedError
# TODO implement the version that takes into account variance
# TODO note it will still just be acc on batch rather than set... (unlike val)
# num_classes = len(np.unique(self.cluster_classes.cpu())) # m # the number of classes in this batch
#
# num_clusters = cluster_means.size()[0] # m*k
#
# # Sort the clusters by closest distance to sample
# sorted_sample_costs, indices = torch.sort(sample_costs)
# sorted_sample_costs = sorted_sample_costs.squeeze()
# indices = indices.type(GPU_LONG_DTYPE).squeeze()
# sorted_cluster_classes = self.cluster_classes[indices]
#
# # If L < num_clusters then lets only take the top L
# if self.L < num_clusters:
# sorted_sample_costs = sorted_sample_costs[:self.L]
# sorted_cluster_classes = sorted_cluster_classes[:self.L]
# num_clusters = self.L
#
# normalised_costs = torch.exp(var_normalizer * sorted_sample_costs).type(GPU_FLOAT_DTYPE)
#
# per_class_costs = torch.zeros(num_classes, num_clusters).type(GPU_FLOAT_DTYPE) # todo, address this issue of num_classes not matching batch_size and that being a problem...
# per_class_costs = per_class_costs.scatter_(0, sorted_cluster_classes.unsqueeze(0), normalised_costs.unsqueeze(0))
# numerator = per_class_costs.sum(1)
#
# denominator = torch.sum(normalised_costs)
#
# epsilon = 1e-8
#
# probs = numerator / (denominator + epsilon)
#
# _, pred = probs.max(0)
# acc = pred.eq(target).float()
return total_loss, losses, pred, acc
class MagnetLossEval(nn.Module):
def __init__(self, L=128, style='magnet'):
super(MagnetLossEval, self).__init__()
self.cluster_means = None
self.cluster_classes = None
self.variance = None
self.L = L
self.style = style
def forward(self, input, target): # reps and classes, x and y # expects batch size of 1
# make sure these have been set with the callbacks!!
assert self.cluster_means is not None
assert self.cluster_classes is not None
assert self.variance is not None
GPU_INT_DTYPE = torch.cuda.IntTensor
GPU_LONG_DTYPE = torch.cuda.LongTensor
GPU_FLOAT_DTYPE = torch.cuda.FloatTensor
num_classes = np.max(self.cluster_classes) + 1 # the number of classes of the dataset
cluster_means = torch.from_numpy(self.cluster_means).type(GPU_FLOAT_DTYPE)
cluster_classes = torch.from_numpy(self.cluster_classes).type(GPU_LONG_DTYPE)
sample_costs = compute_euclidean_distance(cluster_means, expand_dims(input, 1))
if self.style == 'closest':
_, pred = sample_costs.min(1)
pred = cluster_classes[pred]
acc = pred.eq(target).float()
return torch.zeros(1), torch.zeros(1), pred, acc
else:
num_clusters = cluster_means.size()[0]
# Sort the clusters by closest distance to sample
sorted_sample_costs, indices = torch.sort(sample_costs)
sorted_sample_costs = sorted_sample_costs.squeeze()
indices = indices.type(GPU_LONG_DTYPE).squeeze()
sorted_cluster_classes = cluster_classes[indices]
# If L < num_clusters then lets only take the top L
if self.L < num_clusters:
sorted_sample_costs = sorted_sample_costs[:self.L]
sorted_cluster_classes = sorted_cluster_classes[:self.L]
num_clusters = self.L
var_normalizer = -1 / (2 * self.variance ** 2)
normalised_costs = torch.exp(var_normalizer * sorted_sample_costs).type(GPU_FLOAT_DTYPE)
per_class_costs = torch.zeros(num_classes, num_clusters).type(GPU_FLOAT_DTYPE)
per_class_costs = per_class_costs.scatter_(0, sorted_cluster_classes.unsqueeze(0), normalised_costs.unsqueeze(0))
numerator = per_class_costs.sum(1)
denominator = torch.sum(normalised_costs)
epsilon = 1e-8
probs = numerator / (denominator + epsilon)
_, pred = probs.max(0)
acc = pred.eq(target).float()
return torch.zeros(1), torch.zeros(1), pred, acc
def expand_dims(var, dim=0):
""" Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html).
var = torch.range(0, 9).view(-1, 2)
torch.expand_dims(var, 0).size()
# (1, 5, 2)
"""
sizes = list(var.size())
sizes.insert(dim, 1)
return var.view(*sizes)
def comparison_mask(a_labels, b_labels):
"""Computes boolean mask for distance comparisons"""
return torch.eq(expand_dims(a_labels, 1),
expand_dims(b_labels, 0))
def dynamic_partition(X, partitions, n_clusters):
"""Partitions the data into the number of cluster bins"""
cluster_bin = torch.chunk(X, n_clusters)
return cluster_bin
def compute_euclidean_distance(x, y):
return torch.sum((x - y)**2, dim=2)
| 40.105932 | 187 | 0.65177 | [
"MIT"
] | jiajunhua/HaydenFaulkner-pytorch.repmet | losses/magnet_loss.py | 9,465 | Python |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RIca(RPackage):
"""Independent Component Analysis (ICA) using various algorithms: FastICA,
Information-Maximization (Infomax), and Joint Approximate Diagonalization
of Eigenmatrices (JADE)."""
homepage = "https://cloud.r-project.org/package=ica"
url = "https://cloud.r-project.org/src/contrib/ica_1.0-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ica"
version('1.0-2', sha256='e721596fc6175d3270a60d5e0b5b98be103a8fd0dd93ef16680af21fe0b54179')
version('1.0-1', sha256='98559a8bb12dd134a40ce8fd133803e2a38456b45d0e2a507d66022a8e2274ae')
version('1.0-0', sha256='9ff4ec7f4525bdce9d7859b22a1a170a1f6f9f7fb9f3d0b537dcaec77cd83d01')
| 43.857143 | 95 | 0.763301 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 0t1s1/spack | var/spack/repos/builtin/packages/r-ica/package.py | 921 | Python |
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPGatherOpTest(hu.HypothesisTestCase):
@given(
dim1=st.integers(256, 512),
dim2=st.integers(32, 256),
is_empty=st.booleans(),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
if in_quantized:
out_quantized = True
data = (np.random.rand(dim1) * 2 - 1).astype(np.float32)
index = np.floor(np.random.rand(dim2) * dim1).astype(np.int32)
Output = collections.namedtuple("Output", ["out", "op_type", "engine"])
outputs = []
op_engine_list = [
("Gather", ""),
("Gather", "DNNLOWP"),
("Int8Gather", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize_data = core.CreateOperator(
"Quantize", ["data"], ["data_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_data])
gather = core.CreateOperator(
op_type,
["data_q" if do_quantize else "data", "index"],
["out_q" if do_dequantize else "out"],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([gather])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["out_q"], ["out"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("data").feed(data, device_option=gc)
self.ws.create_blob("index").feed(index, device_option=gc)
self.ws.run(net)
outputs.append(
Output(out=self.ws.blobs["out"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs, ref=data)
| 35.012195 | 94 | 0.573668 | [
"MIT"
] | Westlanderz/AI-Plat1 | venv/Lib/site-packages/caffe2/quantization/server/gather_dnnlowp_op_test.py | 2,871 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Addons.
TensorFlow Addons is a repository of contributions that conform to well-
established API patterns, but implement new functionality not available
in core TensorFlow. TensorFlow natively supports a large number of
operators, layers, metrics, losses, and optimizers. However, in a fast
moving field like ML, there are many interesting new developments that
cannot be integrated into core TensorFlow (because their broad
applicability is not yet clear, or it is mostly used by a smaller subset
of the community).
"""
import os
from pathlib import Path
import sys
from datetime import datetime
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
from setuptools import Extension
DOCLINES = __doc__.split("\n")
def get_last_commit_time() -> str:
string_time = os.getenv("NIGHTLY_TIME").replace('"', "")
return datetime.strptime(string_time, "%Y-%m-%dT%H:%M:%SZ").strftime("%Y%m%d%H%M%S")
def get_project_name_version():
# Version
version = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
exec(fp.read(), version)
project_name = "tensorflow-addons"
if "--nightly" in sys.argv:
project_name = "tfa-nightly"
version["__version__"] += get_last_commit_time()
sys.argv.remove("--nightly")
return project_name, version
def get_ext_modules():
ext_modules = []
if "--platlib-patch" in sys.argv:
if sys.platform.startswith("linux"):
# Manylinux2010 requires a patch for platlib
ext_modules = [Extension("_foo", ["stub.cc"])]
sys.argv.remove("--platlib-patch")
return ext_modules
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return True
project_name, version = get_project_name_version()
inclusive_min_tf_version = version["INCLUSIVE_MIN_TF_VERSION"]
exclusive_max_tf_version = version["EXCLUSIVE_MAX_TF_VERSION"]
setup(
name=project_name,
version=version["__version__"],
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
author="Google Inc.",
author_email="[email protected]",
packages=find_packages(),
ext_modules=get_ext_modules(),
install_requires=Path("requirements.txt").read_text().splitlines(),
extras_require={
"tensorflow": [
"tensorflow>={},<{}".format(
inclusive_min_tf_version, exclusive_max_tf_version
)
],
"tensorflow-gpu": [
"tensorflow-gpu>={},<{}".format(
inclusive_min_tf_version, exclusive_max_tf_version
)
],
"tensorflow-cpu": [
"tensorflow-cpu>={},<{}".format(
inclusive_min_tf_version, exclusive_max_tf_version
)
],
},
include_package_data=True,
zip_safe=False,
distclass=BinaryDistribution,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
],
license="Apache 2.0",
keywords="tensorflow addons machine learning",
)
| 34.804688 | 88 | 0.668238 | [
"Apache-2.0"
] | Aakash-2904/addons | setup.py | 4,455 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import core.pywikibot as pywikibot
from utils import (
SITE_NAMES,
create_links_string,
read_log,
generate_list_page,
)
def main():
site = pywikibot.Site()
for site_name in SITE_NAMES:
page_name, site_regexp, list_page = generate_list_page(site, site_name)
list_page.text = list_page.text + '\n'
bad_pages_count = int(re.findall(r'Текущее количество: (\d+)', list_page.text)[0])
read_pages_count = 0
for string in list_page.text.split('\n'):
if not string or string[0] != '#':
continue
title = re.findall(r'\[\[(.+?)]]', string)[0]
page = pywikibot.Page(site, f'{title}')
links = [link for link in re.findall(site_regexp, page.text, flags=re.I)]
if not links:
list_page.text = list_page.text.replace(f'{string}\n', '')
bad_pages_count -= 1
else:
links_string = create_links_string(links, page)
list_page.text = list_page.text.replace(string, links_string[:-1:])
read_pages_count += 1
read_log(read_pages_count)
list_page.text = re.sub(r'Текущее количество: (\d+)', fr'Текущее количество: {bad_pages_count}', list_page.text)
list_page.save(u'обновление ссылок')
if __name__ == '__main__':
main()
| 30.170213 | 120 | 0.594499 | [
"MIT"
] | MCPN/NPBot | outdated/site_update.py | 1,485 | Python |
from fractions import Fraction
def left_rect(f,x,h):
return f(x)
def mid_rect(f,x,h):
return f(x + h/2)
def right_rect(f,x,h):
return f(x+h)
def trapezium(f,x,h):
return (f(x) + f(x+h))/2.0
def simpson(f,x,h):
return (f(x) + 4*f(x + h/2) + f(x+h))/6.0
def cube(x):
return x*x*x
def reciprocal(x):
return 1/x
def identity(x):
return x
def integrate( f, a, b, steps, meth):
h = (b-a)/steps
ival = h * sum(meth(f, a+i*h, h) for i in range(steps))
return ival
# Tests
for a, b, steps, func in ((0., 1., 100, cube), (1., 100., 1000, reciprocal)):
for rule in (left_rect, mid_rect, right_rect, trapezium, simpson):
print(('%s integrated using %s\n from %r to %r (%i steps) = %r' %
(func.__name__, rule.__name__, a, b, steps,
integrate( func, a, b, steps, rule))))
a, b = Fraction.from_float(a), Fraction.from_float(b)
for rule in (left_rect, mid_rect, right_rect, trapezium, simpson):
print(('%s integrated using %s\n from %r to %r (%i steps and fractions) = %r' %
(func.__name__, rule.__name__, a, b, steps,
float(integrate( func, a, b, steps, rule)))))
# Extra tests (compute intensive)
for a, b, steps, func in ((0., 5000., 5000000, identity),
(0., 6000., 6000000, identity)):
for rule in (left_rect, mid_rect, right_rect, trapezium, simpson):
print(('%s integrated using %s\n from %r to %r (%i steps) = %r' %
(func.__name__, rule.__name__, a, b, steps,
integrate( func, a, b, steps, rule))))
a, b = Fraction.from_float(a), Fraction.from_float(b)
for rule in (left_rect, mid_rect, right_rect, trapezium, simpson):
print(('%s integrated using %s\n from %r to %r (%i steps and fractions) = %r' %
(func.__name__, rule.__name__, a, b, steps,
float(integrate( func, a, b, steps, rule)))))
| 34.267857 | 88 | 0.581032 | [
"MIT"
] | ethansaxenian/RosettaDecode | lang/Python/numerical-integration-1.py | 1,919 | Python |
# -*- coding: utf-8 -*-
import json
import os
from collections import ChainMap
from django.contrib.messages import constants as messages
def bool_from_env(var, default: bool=False) -> bool:
"""Helper for converting env string into boolean.
Returns bool True for string values: '1' or 'true', False otherwise.
"""
def str_to_bool(s: str) -> bool:
return s.lower() in ('1', 'true')
os_var = os.environ.get(var)
if os_var is None:
# as user expect
return default
else:
return str_to_bool(os_var)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('SECRET_KEY', 'CHANGE_ME')
LOG_FILEPATH = os.environ.get('LOG_FILEPATH', '/tmp/ralph.log')
DEBUG = False
ALLOWED_HOSTS = ['*']
# Only for deployment
RALPH_INSTANCE = os.environ.get('RALPH_INSTANCE', 'http://127.0.0.1:8000')
# Application definition
INSTALLED_APPS = (
'ralph.admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_rq',
'import_export',
'mptt',
'reversion',
'sitetree',
'ralph.accounts',
'ralph.assets',
'ralph.attachments',
'ralph.back_office',
'ralph.configuration_management',
'ralph.dashboards',
'ralph.data_center',
'ralph.dhcp',
'ralph.deployment',
'ralph.licences',
'ralph.domains',
'ralph.supports',
'ralph.security',
'ralph.lib.foundation',
'ralph.lib.table',
'ralph.networks',
'ralph.data_importer',
'ralph.dc_view',
'ralph.reports',
'ralph.virtual',
'ralph.operations',
'ralph.lib.external_services',
'ralph.lib.transitions',
'ralph.lib.permissions',
'ralph.lib.custom_fields',
'ralph.notifications',
'ralph.ssl_certificates',
'rest_framework',
'rest_framework.authtoken',
'taggit',
'taggit_serializer',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'threadlocals.middleware.ThreadLocalMiddleware',
)
ROOT_URLCONF = 'ralph.urls'
URLCONF_MODULES = [ROOT_URLCONF]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'ralph.lib.template.loaders.AppTemplateLoader',
]),
],
},
},
]
WSGI_APPLICATION = 'ralph.wsgi.application'
MYSQL_OPTIONS = {
'sql_mode': 'TRADITIONAL',
'charset': 'utf8',
'init_command': """
SET default_storage_engine=INNODB;
SET character_set_connection=utf8,collation_connection=utf8_unicode_ci;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
"""
}
DATABASE_SSL_CA = os.environ.get('DATABASE_SSL_CA', None)
if DATABASE_SSL_CA:
MYSQL_OPTIONS.update({'ssl': {'ca': DATABASE_SSL_CA}})
DATABASES = {
'default': {
'ENGINE': 'transaction_hooks.backends.mysql',
'NAME': os.environ.get('DATABASE_NAME', 'ralph_ng'),
'USER': os.environ.get('DATABASE_USER', 'ralph_ng'),
'PASSWORD': os.environ.get('DATABASE_PASSWORD', 'ralph_ng') or None,
'HOST': os.environ.get('DATABASE_HOST', '127.0.0.1'),
'PORT': os.environ.get('DATABASE_PORT', 3306),
'OPTIONS': MYSQL_OPTIONS,
'ATOMIC_REQUESTS': True,
'TEST': {
'NAME': 'test_ralph_ng',
}
}
}
AUTH_USER_MODEL = 'accounts.RalphUser'
LOGIN_URL = '/login/'
LANGUAGE_CODE = 'en-us'
LOCALE_PATHS = (os.path.join(BASE_DIR, 'locale'), )
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = bool_from_env('USE_I18N', True)
USE_L10N = bool_from_env('USE_L10N', True)
USE_TZ = False
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'admin', 'static'),
)
STATIC_ROOT = os.environ.get(
'STATIC_ROOT', os.path.join(BASE_DIR, 'var', 'static')
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.environ.get(
'MEDIA_ROOT', os.path.join(BASE_DIR, 'var', 'media')
)
# adapt message's tags to bootstrap
MESSAGE_TAGS = {
messages.DEBUG: 'info',
messages.ERROR: 'alert',
}
DEFAULT_DEPRECIATION_RATE = int(os.environ.get('DEFAULT_DEPRECIATION_RATE', 25)) # noqa
DEFAULT_LICENCE_DEPRECIATION_RATE = int(os.environ.get('DEFAULT_LICENCE_DEPRECIATION_RATE', 50)) # noqa
CHECK_IP_HOSTNAME_ON_SAVE = bool_from_env('CHECK_IP_HOSTNAME_ON_SAVE', True)
ASSET_HOSTNAME_TEMPLATE = {
'prefix': '{{ country_code|upper }}{{ code|upper }}',
'postfix': '',
'counter_length': 5,
}
DEFAULT_COUNTRY_CODE = os.environ.get('DEFAULT_COUNTRY_CODE', 'POL')
LDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: user, person
ADMIN_SITE_HEADER = 'Ralph 3'
ADMIN_SITE_TITLE = 'Ralph 3'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'datefmt': '%d.%m.%Y %H:%M:%S',
'format': (
'[%(asctime)08s,%(msecs)03d] %(levelname)-7s [%(processName)s'
' %(process)d] %(module)s - %(message)s'),
},
'simple': {
'datefmt': '%H:%M:%S',
'format': '[%(asctime)08s] %(levelname)-7s %(message)s',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1024 * 1024 * 100, # 100 MB
'backupCount': 10,
'filename': os.environ.get(
'LOG_FILEPATH', LOG_FILEPATH
),
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': os.environ.get('LOGGING_DJANGO_REQUEST_LEVEL', 'WARNING'),
'propagate': True,
},
'ralph': {
'handlers': ['file'],
'level': os.environ.get('LOGGING_RALPH_LEVEL', 'WARNING'),
'propagate': True,
},
'rq.worker': {
'level': os.environ.get('LOGGING_RQ_LEVEL', 'WARNING'),
'handlers': ['file'],
'propagate': True,
}
},
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'ralph.lib.permissions.api.RalphPermission',
),
'DEFAULT_FILTER_BACKENDS': (
'ralph.lib.permissions.api.PermissionsForObjectFilter',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework_xml.renderers.XMLRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
'rest_framework_xml.parsers.XMLParser',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', # noqa
'PAGE_SIZE': 10,
'DEFAULT_METADATA_CLASS': 'ralph.lib.api.utils.RalphApiMetadata',
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning', # noqa
'DEFAULT_VERSION': 'v1',
'ALLOWED_VERSIONS': ('v1',)
}
API_THROTTLING = bool_from_env('API_THROTTLING', default=False)
if API_THROTTLING:
REST_FRAMEWORK.update({
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'user': os.environ.get('API_THROTTLING_USER', '5000/hour')
}
})
REDIS_MASTER_IP = None
REDIS_MASTER_PORT = None
REDIS_SENTINEL_ENABLED = bool_from_env('REDIS_SENTINEL_ENABLED', False)
if REDIS_SENTINEL_ENABLED:
from redis.sentinel import Sentinel
# REDIS_SENTINEL_HOSTS env variable format: host_1:port;host_2:port
REDIS_SENTINEL_HOSTS = os.environ['REDIS_SENTINEL_HOSTS'].split(';')
REDIS_CLUSTER_NAME = os.environ['REDIS_CLUSTER_NAME']
sentinel = Sentinel(
[tuple(s_host.split(':')) for s_host in REDIS_SENTINEL_HOSTS],
socket_timeout=float(
os.environ.get('REDIS_SENTINEL_SOCKET_TIMEOUT', 0.2)
)
)
REDIS_MASTER_IP, REDIS_MASTER_PORT = sentinel.discover_master(
REDIS_CLUSTER_NAME
)
REDIS_CONNECTION = {
'HOST': REDIS_MASTER_IP or os.environ.get('REDIS_HOST', 'localhost'),
'PORT': REDIS_MASTER_PORT or os.environ.get('REDIS_PORT', '6379'),
'DB': int(os.environ.get('REDIS_DB', 0)),
'PASSWORD': os.environ.get('REDIS_PASSWORD', ''),
# timeout for executing commands
'TIMEOUT': float(os.environ.get('REDIS_TIMEOUT', 10.0)),
# timeout for connecting through socket to redis
'CONNECT_TIMEOUT': float(os.environ.get('REDIS_CONNECT_TIMEOUT', 1.0)),
}
# set to False to turn off cache decorator
USE_CACHE = bool_from_env('USE_CACHE', True)
SENTRY_ENABLED = bool_from_env('SENTRY_ENABLED')
SENTRY_JS_DSN = os.environ.get('SENTRY_JS_DSN', None)
SENTRY_JS_CONFIG = json.loads(os.environ.get('SENTRY_JS_CONFIG', '{}'))
BACK_OFFICE_ASSET_AUTO_ASSIGN_HOSTNAME = True
BACKOFFICE_HOSTNAME_FIELD_READONLY = bool_from_env(
'BACKOFFICE_HOSTNAME_FIELD_READONLY', True
)
TAGGIT_CASE_INSENSITIVE = True # case insensitive tags
RQ_QUEUES = {
'default': dict(
**REDIS_CONNECTION
)
}
RALPH_QUEUES = {
'ralph_ext_pdf': {},
'ralph_async_transitions': {
'DEFAULT_TIMEOUT': 3600,
},
}
for queue_name, options in RALPH_QUEUES.items():
RQ_QUEUES[queue_name] = ChainMap(RQ_QUEUES['default'], options)
RALPH_EXTERNAL_SERVICES = {
'PDF': {
'queue_name': 'ralph_ext_pdf',
'method': 'inkpy_jinja.pdf',
},
}
RALPH_INTERNAL_SERVICES = {
'ASYNC_TRANSITIONS': {
'queue_name': 'ralph_async_transitions',
'method': 'ralph.lib.transitions.async.run_async_transition'
}
}
# =============================================================================
# DC view
# =============================================================================
RACK_LISTING_NUMBERING_TOP_TO_BOTTOM = False
# =============================================================================
# Deployment
# =============================================================================
DEPLOYMENT_MAX_DNS_ENTRIES_TO_CLEAN = 30
# =============================================================================
# Example:
# MY_EQUIPMENT_LINKS = [
# {'url': 'http://....', 'name': 'Link name'},
# ]
MY_EQUIPMENT_LINKS = json.loads(os.environ.get('MY_EQUIPMENT_LINKS', '[]'))
MY_EQUIPMENT_REPORT_FAILURE_URL = os.environ.get('MY_EQUIPMENT_REPORT_FAILURE_URL', '') # noqa
MY_EQUIPMENT_SHOW_BUYOUT_DATE = bool_from_env('MY_EQUIPMENT_SHOW_BUYOUT_DATE')
MY_EQUIPMENT_BUYOUT_URL = os.environ.get('MY_EQUIPMENT_BUYOUT_URL', '')
# Sets URL shown to user if they declare that they dp not have specific asset.
MISSING_ASSET_REPORT_URL = os.environ.get('MISSING_ASSET_REPORT_URL', None)
# Redirect to result detail view if there is only one in search result list
REDIRECT_TO_DETAIL_VIEW_IF_ONE_SEARCH_RESULT = bool_from_env(
'REDIRECT_TO_DETAIL_VIEW_IF_ONE_SEARCH_RESULT', True
)
# Stocktaking tagging config - each variable describes individual tag.
# To disable tag set it to None or, in case of date tag, set variable to '0'.
INVENTORY_TAG = os.environ.get('INVENTORY_TAG', 'INV')
# This tag means user himself confirmed asset possession.
INVENTORY_TAG_USER = os.environ.get('INVENTORY_TAG_USER', 'INV_CONF')
INVENTORY_TAG_MISSING = os.environ.get('INVENTORY_TAG_MISSING', 'INV_MISSING')
INVENTORY_TAG_APPEND_DATE = bool_from_env('INVENTORY_TAG_APPEND_DATE', True)
ENABLE_ACCEPT_ASSETS_FOR_CURRENT_USER = bool_from_env('ENABLE_ACCEPT_ASSETS_FOR_CURRENT_USER') # noqa
ACCEPT_ASSETS_FOR_CURRENT_USER_CONFIG = {
'TRANSITION_ID': os.environ.get(
'ACCEPT_ASSETS_FOR_CURRENT_USER_TRANSITION_ID', None
),
# in_progress by default
'BACK_OFFICE_ACCEPT_STATUS': os.environ.get(
'ACCEPT_ASSETS_FOR_CURRENT_USER_BACK_OFFICE_ACCEPT_STATUS', 2
),
'LOAN_TRANSITION_ID': os.environ.get(
'LOAN_ASSETS_FOR_CURRENT_USER_TRANSITION_ID', None
),
# loan_in_progress by default
'BACK_OFFICE_ACCEPT_LOAN_STATUS': os.environ.get(
'LOAN_ASSETS_FOR_CURRENT_USER_BACK_OFFICE_ACCEPT_STATUS', 13
),
}
RELEASE_REPORT_CONFIG = {
# report with name 'release' is by default
'DEFAULT_REPORT': os.environ.get(
'RELEASE_REPORT_CONFIG_DEFAULT_REPORT', 'release'
),
# map transition id to different report
'REPORTS_MAPPER': json.loads(
os.environ.get('RELEASE_REPORT_CONFIG_REPORTS_MAPPER', '{}')
)
}
MAP_IMPORTED_ID_TO_NEW_ID = False
OPENSTACK_INSTANCES = json.loads(os.environ.get('OPENSTACK_INSTANCES', '[]'))
DEFAULT_OPENSTACK_PROVIDER_NAME = os.environ.get(
'DEFAULT_OPENSTACK_PROVIDER_NAME', 'openstack'
)
# issue tracker url for Operations urls (issues ids) - should end with /
ISSUE_TRACKER_URL = os.environ.get('ISSUE_TRACKER_URL', '')
# Networks
DEFAULT_NETWORK_BOTTOM_MARGIN = int(os.environ.get('DEFAULT_NETWORK_BOTTOM_MARGIN', 10)) # noqa
DEFAULT_NETWORK_TOP_MARGIN = int(os.environ.get('DEFAULT_NETWORK_TOP_MARGIN', 0)) # noqa
# deprecated, to remove in the future
DEFAULT_NETWORK_MARGIN = int(os.environ.get('DEFAULT_NETWORK_MARGIN', 10))
# when set to True, network records (IP/Ethernet) can't be modified until
# 'expose in DHCP' is selected
DHCP_ENTRY_FORBID_CHANGE = bool_from_env('DHCP_ENTRY_FORBID_CHANGE', True)
# enable integration with DNSaaS, for details see
# https://github.com/allegro/django-powerdns-dnssec
ENABLE_DNSAAS_INTEGRATION = bool_from_env('ENABLE_DNSAAS_INTEGRATION')
DNSAAS_URL = os.environ.get('DNSAAS_URL', '')
DNSAAS_TOKEN = os.environ.get('DNSAAS_TOKEN', '')
DNSAAS_AUTO_PTR_ALWAYS = os.environ.get('DNSAAS_AUTO_PTR_ALWAYS', 2)
DNSAAS_AUTO_PTR_NEVER = os.environ.get('DNSAAS_AUTO_PTR_NEVER', 1)
# user in dnsaas which can do changes, like update TXT records etc.
DNSAAS_OWNER = os.environ.get('DNSAAS_OWNER', 'ralph')
# pyhermes topic where messages about auto txt records are announced
DNSAAS_AUTO_TXT_RECORD_TOPIC_NAME = os.environ.get(
'DNSAAS_AUTO_TXT_RECORD_TOPIC_NAME', None
)
# define names of values send to DNSAAS for:
# DataCenterAsset, Cluster, VirtualServer
DNSAAS_AUTO_TXT_RECORD_PURPOSE_MAP = {
# self.configuration_path.class will be send as 'VENTURE'
'class_name': 'VENTURE',
# self.configuration_path.module will be send as 'ROLE'
'module_name': 'ROLE',
# self.configuration_path.path will be send as 'PATH'
'configuration_path': 'CONFIGURATION_PATH',
# self.service_env will be send as 'SERVICE_ENV'
'service_env': 'SERVICE_ENV',
# self.model will be send as 'MODEL'
'model': 'MODEL',
# self.location will be send as 'LOCATION'
'location': 'LOCATION',
# self.location will be send as 'LOCATION'
# if any of above is set to none
# 'location': None
# then this value won't be set at all
}
if ENABLE_DNSAAS_INTEGRATION:
INSTALLED_APPS += (
'ralph.dns',
)
DNSAAS_AUTO_UPDATE_HOST_DNS = bool_from_env('DNSAAS_AUTO_UPDATE_HOST_DNS')
DOMAIN_DATA_UPDATE_TOPIC = os.environ.get(
'DOMAIN_DATA_UPDATE_TOPIC', None
)
DOMAIN_OWNER_TYPE = {
'BO': 'Business Owner',
'TO': 'Technical Owner',
}
# Transitions settings
# E.g.: pl (see: https://www.iso.org/iso-3166-country-codes.html)
CHANGE_HOSTNAME_ACTION_DEFAULT_COUNTRY = None
# Change management settings
CHANGE_MGMT_OPERATION_STATUSES = {
'OPENED': os.getenv(
'CHANGE_MGMT_OPERATION_STATUS_OPENED', 'Open'
),
'IN_PROGRESS': os.getenv(
'CHANGE_MGMT_OPERATION_STATUS_IN_PROGRESS', 'In Progress'
),
'RESOLVED': os.getenv(
'CHANGE_MGMT_OPERATION_STATUS_RESOLVED', 'Resolved'
),
'CLOSED': os.getenv(
'CHANGE_MGMT_OPERATION_STATUS_CLOSED', 'Closed'
),
'REOPENED': os.getenv(
'CHANGE_MGMT_OPERATION_STATUS_REOPENED', 'Reopened'
),
'TODO': os.getenv(
'CHANGE_MGMT_OPERATION_STATUS_TODO', 'Todo'
),
'BLOCKED': os.getenv(
'CHANGE_MGMT_OPERATION_STATUS_BLOCKED', 'Blocked'
)
}
CHANGE_MGMT_BASE_OBJECT_LOADER = os.getenv(
'CHANGE_MGMT_BASE_OBJECT_LOADER', None
)
CHANGE_MGMT_PROCESSOR = os.getenv(
'CHANGE_MGMT_PROCESSOR', 'ralph.operations.changemanagement.jira'
)
HERMES_CHANGE_MGMT_TOPICS = {
'CHANGES': os.getenv(
'HERMES_CHANGE_MGMT_CHANGES_TOPIC', 'hermes.changemanagement.changes'
)
}
# Hermes settings
ENABLE_HERMES_INTEGRATION = bool_from_env('ENABLE_HERMES_INTEGRATION')
HERMES = json.loads(os.environ.get('HERMES', '{}'))
HERMES['ENABLED'] = ENABLE_HERMES_INTEGRATION
# topic name where DC asset, cloud host, virtual server changes should be
# announced
HERMES_HOST_UPDATE_TOPIC_NAME = os.environ.get(
'HERMES_HOST_UPDATE_TOPIC_NAME', None
)
HERMES_SERVICE_TOPICS = {
'CREATE': os.environ.get(
'SERVICE_CREATE_HERMES_TOPIC_NAME', 'hermes.service.create'
),
'DELETE': os.environ.get(
'SERVICE_DELETE_HERMES_TOPIC_NAME', 'hermes.service.delete'
),
'UPDATE': os.environ.get(
'SERVICE_UPDATE_HERMES_TOPIC_NAME', 'hermes.service.update'
),
'REFRESH': os.environ.get(
'SERVICE_REFRESH_HERMES_TOPIC_NAME', 'hermes.service.refresh'
)
}
if ENABLE_HERMES_INTEGRATION:
INSTALLED_APPS += (
'pyhermes.apps.django',
)
ENABLE_SAVE_DESCENDANTS_DURING_NETWORK_SYNC = bool_from_env(
'ENABLE_SAVE_DESCENDANTS_DURING_NETWORK_SYNC', True
)
ENABLE_EMAIL_NOTIFICATION = bool_from_env('ENABLE_EMAIL_NOTIFICATION')
EMAIL_HOST = os.environ.get('EMAIL_HOST', None)
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', None)
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', None)
EMAIL_PORT = os.environ.get('EMAIL_PORT', 25)
EMAIL_USE_TLS = bool_from_env('EMAIL_USE_TLS', False)
EMAIL_FROM = os.environ.get('EMAIL_FROM', None)
EMAIL_MESSAGE_CONTACT_NAME = os.environ.get('EMAIL_MESSAGE_CONTACT_NAME', None)
EMAIL_MESSAGE_CONTACT_EMAIL = os.environ.get(
'EMAIL_MESSAGE_CONTACT_EMAIL', None
)
SCM_TOOL_URL = os.getenv('SCM_TOOL_URL', '')
RALPH_HOST_URL = os.environ.get('RALPH_HOST_URL', None)
# METRICS
COLLECT_METRICS = False
ALLOW_PUSH_GRAPHS_DATA_TO_STATSD = False
STATSD_GRAPHS_PREFIX = 'ralph.graphs'
TRANSITION_TEMPLATES = None
| 32.371476 | 104 | 0.674949 | [
"Apache-2.0"
] | p-bo/ralph | src/ralph/settings/base.py | 19,520 | Python |
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['index', 'divisor'])
def glVertexAttribDivisorNV(index, divisor):
pass
| 19.571429 | 47 | 0.751825 | [
"MIT"
] | cydenix/OpenGLCffi | OpenGLCffi/GLES3/EXT/NV/instanced_arrays.py | 137 | Python |
# -*- coding: utf-8 -*-
'''
Encapsulate the different transports available to Salt. Currently this is only ZeroMQ.
'''
import salt.payload
import salt.auth
class Channel(object):
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
if 'transport_type' in opts:
ttype = opts['transport_type']
elif 'transport_type' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport_type']
if ttype == 'zeromq':
return ZeroMQChannel(opts, **kwargs)
else:
raise Exception("Channels are only defined for ZeroMQ")
# return NewKindOfChannel(opts, **kwargs)
class ZeroMQChannel(Channel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
def __init__(self, opts, **kwargs):
self.opts = opts
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs['crypt'] if 'crypt' in kwargs else 'aes'
self.serial = salt.payload.Serial(opts)
if self.crypt != 'clear':
if 'auth' in kwargs:
self.auth = kwargs['auth']
else:
self.auth = salt.crypt.SAuth(opts)
if 'master_uri' in kwargs:
master_uri = kwargs['master_uri']
else:
master_uri = opts['master_uri']
self.sreq = salt.payload.SREQ(master_uri)
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
ret = self.sreq.send('aes', self.auth.crypticle.dumps(load), tries, timeout)
key = self.auth.get_keys()
aes = key.private_decrypt(ret['key'], 4)
pcrypt = salt.crypt.Crypticle(self.opts, aes)
return pcrypt.loads(ret[dictkey])
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
def _do_transfer():
data = self.sreq.send(
self.crypt,
self.auth.crypticle.dumps(load),
tries,
timeout)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
return data
try:
return _do_transfer()
except salt.crypt.AuthenticationError:
self.auth = salt.crypt.SAuth(self.opts)
return _do_transfer()
def _uncrypted_transfer(self, load, tries=3, timeout=60):
return self.sreq.send(self.crypt, load, tries, timeout)
def send(self, load, tries=3, timeout=60):
if self.crypt != 'clear':
return self._crypted_transfer(load, tries, timeout)
else:
return self._uncrypted_transfer(load, tries, timeout)
# Do we ever do non-crypted transfers?
| 33.453608 | 89 | 0.589522 | [
"Apache-2.0"
] | otrempe/salt | salt/transport/__init__.py | 3,245 | Python |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .baseline import Baseline
| 28.931034 | 74 | 0.771156 | [
"Apache-2.0"
] | guochunhe/Vitis-AI | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res18_market1501_176_80_1.1G_1.3/code/network/__init__.py | 839 | Python |
from django.shortcuts import render,redirect
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.urls.base import reverse
import datetime as dt
from .models import Profile,Project,Rating,User
from .forms import *
from .email import send_welcome_email
# Create your views here.
def signup_view(request):
date = dt.date.today()
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
name=form.cleaned_data['fullname']
email=form.cleaned_data['email']
send_welcome_email(name,email,date)
user = authenticate(username=username, password=password)
login(request, user)
return redirect('main:home')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
def home_page(request):
projects = Project.objects.all()[1:]
highlightProject = Project.objects.all().order_by('id').last()
try:
rating = Rating.objects.filter(project_id=highlightProject.id)
except Rating.DoesNotExist:
rating=None
ctx={
"projects":projects,
"highlightProject":highlightProject,
"rating":rating
}
return render(request,"main/home_page.html",ctx)
@login_required(login_url='/login')
def post_project(request):
current_user = request.user
user = Profile.objects.get(user=current_user)
form = SubmitProjectForm()
if request.method == 'POST':
form = SubmitProjectForm(request.POST,request.FILES)
if form.is_valid():
project = form.save(commit=False)
project.user = user
project.save()
return redirect('/')
else:
form = SubmitProjectForm()
ctx = {
'form':form
}
return render(request,"main/post_project.html",ctx)
@login_required(login_url='/login')
def project_view(request,id):
user = Profile.objects.get(user= request.user)
project = Project.objects.get(id=id)
ratings=Rating.objects.filter(project = project).last()
tech_tags = project.technologies.split(",")
try:
rates = Rating.objects.filter(user=user,project=project).first()
except Rating.DoesNotExist:
rates=None
if rates is None:
rates_status=False
else:
rates_status = True
form = RateForm()
rating=None
if request.method == 'POST':
form = RateForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.user = user
rate.project = project
rate.save()
try:
rating = Rating.objects.filter(project_id=id)
except Rating.DoesNotExist:
rating=None
design = form.cleaned_data['design']
usability = form.cleaned_data['usability']
content = form.cleaned_data['content']
rate.average = (design + usability + content)/2
rate.save()
design_ratings = [d.design for d in rating]
design_average = sum(design_ratings) / len(design_ratings)
usability_ratings = [us.usability for us in rating]
usability_average = sum(usability_ratings) / len(usability_ratings)
content_ratings = [content.content for content in rating]
content_average = sum(content_ratings) / len(content_ratings)
score = (design_average + usability_average + content_average) / 3
rate.design_average = round(design_average, 2)
rate.usability_average = round(usability_average, 2)
rate.content_average = round(content_average, 2)
rate.score = round(score, 2)
rate.save()
return redirect("main:project_view", id=project.id)
else:
form = RateForm()
ctx={
"project":project,
"ratings":ratings,
"form":form,
"tech_tags":tech_tags,
"rates_status":rates_status
}
return render(request,"main/view_project.html",ctx)
@login_required(login_url='/login')
def search_results(request):
if 'search_project' in request.GET and request.GET["search_project"]:
search_term = request.GET.get("search_project")
searched_projects = Project.search_project_by_search_term(search_term)
message = f"{search_term}"
return render(request, 'main/search.html', {"message":message,"projects": searched_projects})
else:
message = "You haven't searched for any project"
return render(request, 'main/search.html', {'message': message})
@login_required(login_url='/login')
def user_profile(request,username):
current_user = request.user
user_selected= User.objects.get(username=username)
user_profile = Profile.filter_profile_by_id(user_selected.id)
projects = Project.objects.filter(user=user_profile)
if request.user == user_selected:
return redirect('main:profile', username=username)
ctx={
"user_profile":user_profile,
"projects":projects,
}
return render (request,'main/user_profile.html',ctx)
@login_required(login_url='/login')
def profile(request,username):
user= User.objects.get(username=username)
user_profile = Profile.filter_profile_by_id(user.id)
projects = Project.objects.filter(user=user_profile)
ctx={
"user_profile":user_profile,
"user":user,
"projects":projects,
}
return render (request,'profile/profile.html',ctx)
@login_required
def update_profile(request,username):
user= User.objects.get(username=username)
profile = Profile.filter_profile_by_id(user.id)
form = UpdateUserProfileForm(instance=profile)
if request.method == "POST":
form = UpdateUserProfileForm(request.POST,request.FILES,instance=profile)
if form.is_valid():
profile = form.save(commit=False)
profile.save()
return redirect('main:profile' ,username=username)
ctx= {"form":form}
return render(request, 'profile/update_profile.html',ctx)
| 31.705 | 101 | 0.645639 | [
"MIT"
] | MugeraH/Awwards | main/views.py | 6,341 | Python |
from .fsapi_core import *
import time
from xml.dom import minidom
class FSAPI_Node(object):
NAN = -65536
def __init__(self, fsapi_type: str, fsapi_property_alias: str = None, fsapi_set_method_alias: str = None):
self._fsapi_type = fsapi_type
self._fsapi_property_alias = fsapi_property_alias
self._fsapi_set_method_alias = fsapi_set_method_alias
def __call__(self, cls):
class FSAPI_Node_Wrapper(cls):
fsapi_type = self._fsapi_type
fsapi_property_alias = self._fsapi_property_alias
if fsapi_property_alias is None:
fsapi_property_alias = cls.key.lower().replace('netremote.', '').replace('.', '_')
FSAPI.register_class(cls, fsapi_property_alias, self._fsapi_set_method_alias, fsapi_type)
def __init__(self):
self._last_value = None
self._last_value_time = 0
self._key = cls.key.lower()
self._max_age = cls.max_age
if hasattr(cls, 'get_url'):
self._get_url = cls.get_url.format(self.key)
self.can_get = True
else:
self.can_get = False
self.get = self.no_get
if hasattr(cls, 'set_url'):
self._set_url = cls.set_url.format(self.key)
self.can_set = True
else:
self.can_set = False
self.set = self.no_set
if hasattr(cls, 'del_url'):
self._del_url = cls.del_url.format(self.key)
else:
self.dele = self.no_dele
if self.fsapi_type == 'u8':
self._parse_value = self._get_fsapi_value_int
self._validate = lambda v: None
self._get_core = lambda: self._call(self._get_url)
elif self.fsapi_type == 'void1':
self._parse_value = self._get_fsapi_value_int
self._validate = lambda v: None
self._convert_to = lambda v: 1
self._get_core = lambda: self._call(self._get_url)
elif self.fsapi_type == 's8':
self._parse_value = self._get_fsapi_value_int
self._validate = lambda v: None
self._get_core = lambda: self._call(self._get_url)
elif self.fsapi_type == 'u32':
self._parse_value = self._get_fsapi_value_int
self._validate = lambda v: None
self._get_core = lambda: self._call(self._get_url)
elif self.fsapi_type == 'bool':
self._parse_value = self._get_fsapi_value_int
self._convert_from = lambda v: bool(v)
self._convert_to = lambda v: int(v)
self._validate = self._validate_boolean
self._get_core = lambda: self._call(self._get_url)
self.fsapi_type = 'u8'
elif self.fsapi_type == 'list':
self._parse_value = self._get_fsapi_value_list
self._validate = lambda v: None
self._get_core = lambda: self._call(self._get_url, extra=dict(maxItems=40))
elif self.fsapi_type == 'str':
self._parse_value = self._get_fsapi_value_str
self._validate = lambda v: None
self._get_core = lambda: self._call(self._get_url)
elif self.fsapi_type == 'raw':
self._validate = lambda v: None
self._get_core = lambda: self._call(self._get_url)
else:
pass # TODO: Log proper error
if not hasattr(self, '_convert_from'):
self._convert_from = self._default_convert
if not hasattr(self, '_convert_to'):
self._convert_to = self._default_convert
if not hasattr(self, '_call'):
self._call = self._default_call
def _default_convert(self, value):
return value
def _default_call(self, path, extra=None):
webfsapi_url = self._fsapi._get_webfsapi()
if not webfsapi_url:
raise Exception('No server found')
if type(extra) is not dict:
extra = dict()
params = dict(
pin=self._fsapi._pin,
sid=self._fsapi.session_id
)
params.update(**extra)
with self._fsapi._access_locker as l:
url = f"{webfsapi_url}/{path}"
res = l.get_request(url, params=params)
return res
def _validate_boolean(self, value):
if type(value) is not bool:
raise RuntimeError('Value must be boolean')
def _inject_fsapi(self, fsapi_obj):
self._fsapi = fsapi_obj
def _get_fsapi_value_str(self, doc: str):
return self._get_fsapi_value_raw(doc, 'c8_array')
def _get_fsapi_value_int(self, doc: str):
return int(self._get_fsapi_value_raw(doc, self.fsapi_type) or FSAPI_Node.NAN)
def _get_fsapi_value_raw(self, doc: str, type_tag: str):
try:
xml = minidom.parseString(doc).firstChild
if xml.getElementsByTagName('status')[0].firstChild.data == 'FS_NODE_BLOCKED':
raise FSAPI_Node_Blocked_Exception()
if not xml.getElementsByTagName('status')[0].firstChild.data == 'FS_OK':
return None
val_tag = xml.getElementsByTagName('value')[0]
child = val_tag.getElementsByTagName(type_tag)[0].firstChild
if child is None:
return None
return child.data
except Exception as e:
self._fsapi.write_log("Getting value_raw {}: received <{}> error: {}".format(self.key, doc, e))
raise
def _get_xml_single_content(self, doc: str, tag_name: str):
try:
xml = minidom.parseString(doc).firstChild
return xml.getElementsByTagName(tag_name)[0].firstChild.data
except Exception as e:
self._fsapi.write_log("Getting single_content {}: received <{}> error: {}".format(self.key, doc, e))
raise
def _get_fsapi_value_list(self, doc: str):
self._fsapi.write_log(f"Getting List, result: {doc}")
xml = minidom.parseString(doc).firstChild
if not xml.getElementsByTagName('status')[0].firstChild.data == 'FS_OK':
return None
ret_store = []
res = xml.getElementsByTagName('item')
for item in res:
index = int(item.getAttribute('key') or '-1')
attrs = {}
attrs['key'] = index
for field in item.getElementsByTagName('field'):
fn = field.getAttribute('name')
fv = ''
for tag_type in ['c8_array', 'u8', 'u32', 's16']:
for val in field.getElementsByTagName(tag_type):
if val.firstChild is None:
fv = None
else:
fv = val.firstChild.data
attrs[fn] = fv
ret_store.append(attrs)
self._fsapi.write_log(f"Parsed List, result: {ret_store}")
return ret_store
def _update_cache(self, value):
self._last_value = value
self._last_value_time = time.time()
def no_get(self):
raise RuntimeError(self._key + ' is not readable')
def get(self):
ret_val = None
if time.time() - self._max_age > self._last_value_time:
response = self._get_core()
ret_val = self._parse_value(response)
self._last_value = ret_val
self._last_value_time = time.time()
else:
ret_val = self._last_value
return self._convert_from(ret_val)
def no_set(self, value):
raise RuntimeError(self._key + ' is not writable')
def set(self, value):
self._validate(value)
converted_value = self._convert_to(value)
response = self._call(self._set_url, dict(value=converted_value))
if isinstance(response, int):
self._fsapi.write_log(f"Trying to set, http-code: {response}")
raise FSAPI_Session_Invalid_Exception()
if self._get_xml_single_content(response, 'status') == 'FS_OK':
self._last_value = value
self._last_value_time = time.time()
return True
elif self._get_xml_single_content(response, 'status') == 'FS_NODE_BLOCKED':
raise FSAPI_Node_Blocked_Exception()
else:
rslt = self._get_xml_single_content(response, 'status')
if rslt is None:
raise Exception("Setting failed - Response: {}".format(response))
else:
raise Exception("Setting failed - Status: {}".format(rslt))
def dele(self):
doc = self._call(self._del_url)
self._last_value_time = 0
self._last_value = None
if self._get_xml_single_content(doc, 'status') == 'FS_OK':
return True
else:
raise Exception("Deleting failed {}".format(self._get_xml_single_content(doc, 'status')))
def no_dele(self):
raise RuntimeError(self._key + ' is not deletable')
return FSAPI_Node_Wrapper
| 45.368644 | 121 | 0.500607 | [
"MIT"
] | jentz1986/shng-undok-plugin | fsapi/fsapi_node.py | 10,707 | Python |
class TestResults:
def __init__(self, name):
self.name = name
self.results = []
def add_result(self, result):
self.results.append(result) | 24.285714 | 35 | 0.611765 | [
"MIT"
] | nokia-wroclaw/innovativeproject-resttest | src/indor/test_results.py | 170 | Python |
import logging
from logging import config
from distutils.util import strtobool
from pgopttune.utils.logger import logging_dict
from pgopttune.config.postgres_server_config import PostgresServerConfig
from pgopttune.config.tune_config import TuneConfig
from pgopttune.config.workload_sampling_config import WorkloadSamplingConfig
from pgopttune.workload.workload_sampler import WorkloadSampler
def main(
conf_path='./conf/postgres_opttune.conf'
):
# read setting parameters
postgres_server_config = PostgresServerConfig(conf_path) # PostgreSQL Server config
tune_config = TuneConfig(conf_path) # Tuning config(only use debug parameter)
workload_sampling_config_test = WorkloadSamplingConfig(conf_path) # Workload Sampling Config
# logging
logging.config.dictConfig(logging_dict(debug=strtobool(tune_config.debug)))
logger = logging.getLogger(__name__)
# workload sampling
workload_sampler = WorkloadSampler(postgres_server_config, workload_sampling_config_test)
workload_save_file_path = workload_sampler.save()
logger.info("Workload sampling is complete.\n"
"Workload save file: {}".format(workload_save_file_path))
logger.info(
"You can automatically tune the saved workload by setting the following in'./conf/postgres_opttune.conf'.\n"
"[turning]\n"
"benchmark = sampled_workload \n"
":\n"
"[sampled-workload]\n"
"sampled_workload_save_file = {}".format(workload_save_file_path))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Sample workload to PostgreSQL database and save to file.',
usage='%(prog)s [options]')
parser.add_argument('-f', '--config_path', type=str, default='./conf/postgres_opttune.conf',
help='postgres opttune conf file path')
args = parser.parse_args()
main(conf_path=args.config_path)
| 42.652174 | 116 | 0.734455 | [
"Apache-2.0"
] | ssl-oyamata/postgres_opttune | sampling_workload.py | 1,962 | Python |
from .fhirbase import fhirbase
class ImplementationGuide(fhirbase):
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide
into a logical whole and to publish a computable definition of all the
parts.
Attributes:
resourceType: This is a ImplementationGuide resource
url: An absolute URI that is used to identify this implementation
guide when it is referenced in a specification, model, design or an
instance. This SHALL be a URL, SHOULD be globally unique, and SHOULD
be an address at which this implementation guide is (or will be)
published. The URL SHOULD include the major version of the
implementation guide. For more information see [Technical and Business
Versions](resource.html#versions).
version: The identifier that is used to identify this version of the
implementation guide when it is referenced in a specification, model,
design or instance. This is an arbitrary value managed by the
implementation guide author and is not expected to be globally unique.
For example, it might be a timestamp (e.g. yyyymmdd) if a managed
version is not available. There is also no expectation that versions
can be placed in a lexicographical sequence.
name: A natural language name identifying the implementation guide.
This name should be usable as an identifier for the module by machine
processing applications such as code generation.
status: The status of this implementation guide. Enables tracking the
life-cycle of the content.
experimental: A boolean value to indicate that this implementation
guide is authored for testing purposes (or
education/evaluation/marketing), and is not intended to be used for
genuine usage.
date: The date (and optionally time) when the implementation guide
was published. The date must change if and when the business version
changes and it must change if the status code changes. In addition, it
should change when the substantive content of the implementation guide
changes.
publisher: The name of the individual or organization that published
the implementation guide.
contact: Contact details to assist a user in finding and communicating
with the publisher.
description: A free text natural language description of the
implementation guide from a consumer's perspective.
useContext: The content was developed with a focus and intent of
supporting the contexts that are listed. These terms may be used to
assist with indexing and searching for appropriate implementation
guide instances.
jurisdiction: A legal or geographic region in which the implementation
guide is intended to be used.
copyright: A copyright statement relating to the implementation guide
and/or its contents. Copyright statements are generally legal
restrictions on the use and publishing of the implementation guide.
fhirVersion: The version of the FHIR specification on which this
ImplementationGuide is based - this is the formal version of the
specification, without the revision number, e.g.
[publication].[major].[minor], which is 3.0.1 for this version.
dependency: Another implementation guide that this implementation
depends on. Typically, an implementation guide uses value sets,
profiles etc.defined in other implementation guides.
package: A logical group of resources. Logical groups can be used when
building pages.
global: A set of profiles that all resources covered by this
implementation guide must conform to.
binary: A binary file that is included in the implementation guide
when it is published.
page: A page / section in the implementation guide. The root page is
the implementation guide home page.
"""
__name__ = 'ImplementationGuide'
def __init__(self, dict_values=None):
self.resourceType = 'ImplementationGuide'
# type: str
# possible values: ImplementationGuide
self.url = None
# type: str
self.version = None
# type: str
self.name = None
# type: str
self.status = None
# type: str
# possible values: draft, active, retired, unknown
self.experimental = None
# type: bool
self.date = None
# type: str
self.publisher = None
# type: str
self.contact = None
# type: list
# reference to ContactDetail
self.description = None
# type: str
self.useContext = None
# type: list
# reference to UsageContext
self.jurisdiction = None
# type: list
# reference to CodeableConcept
self.copyright = None
# type: str
self.fhirVersion = None
# type: str
self.dependency = None
# type: list
# reference to ImplementationGuide_Dependency
self.package = None
# type: list
# reference to ImplementationGuide_Package
self._global = None
# type: list
# reference to ImplementationGuide_Global
self.binary = None
# type: list
self.page = None
# reference to ImplementationGuide_Page
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'draft', 'active', 'retired', 'unknown']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'draft, active, retired, unknown'))
def get_relationships(self):
return [
{'parent_entity': 'ImplementationGuide_Package',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide',
'child_variable': 'package'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide',
'child_variable': 'contact'},
{'parent_entity': 'ImplementationGuide_Global',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide',
'child_variable': '_global'},
{'parent_entity': 'ImplementationGuide_Dependency',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide',
'child_variable': 'dependency'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide',
'child_variable': 'jurisdiction'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide',
'child_variable': 'useContext'},
{'parent_entity': 'ImplementationGuide_Page',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide',
'child_variable': 'page'},
]
class ImplementationGuide_Dependency(fhirbase):
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide
into a logical whole and to publish a computable definition of all the
parts.
Attributes:
type: How the dependency is represented when the guide is published.
uri: Where the dependency is located.
"""
__name__ = 'ImplementationGuide_Dependency'
def __init__(self, dict_values=None):
self.type = None
# type: str
# possible values: reference, inclusion
self.uri = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.type is not None:
for value in self.type:
if value is not None and value.lower() not in [
'reference', 'inclusion']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'reference, inclusion'))
class ImplementationGuide_Package(fhirbase):
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide
into a logical whole and to publish a computable definition of all the
parts.
Attributes:
name: The name for the group, as used in page.package.
description: Human readable text describing the package.
resource: A resource that is part of the implementation guide.
Conformance resources (value set, structure definition, capability
statements etc.) are obvious candidates for inclusion, but any kind of
resource can be included as an example resource.
"""
__name__ = 'ImplementationGuide_Package'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.description = None
# type: str
self.resource = None
# type: list
# reference to ImplementationGuide_Resource
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ImplementationGuide_Resource',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide_Package',
'child_variable': 'resource'},
]
class ImplementationGuide_Resource(fhirbase):
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide
into a logical whole and to publish a computable definition of all the
parts.
Attributes:
example: Whether a resource is included in the guide as part of the
rules defined by the guide, or just as an example of a resource that
conforms to the rules and/or help implementers understand the intent
of the guide.
name: A human assigned name for the resource. All resources SHOULD
have a name, but the name may be extracted from the resource (e.g.
ValueSet.name).
description: A description of the reason that a resource has been
included in the implementation guide.
acronym: A short code that may be used to identify the resource
throughout the implementation guide.
sourceUri: Where this resource is found.
sourceReference: Where this resource is found.
exampleFor: Another resource that this resource is an example for.
This is mostly used for resources that are included as examples of
StructureDefinitions.
"""
__name__ = 'ImplementationGuide_Resource'
def __init__(self, dict_values=None):
self.example = None
# type: bool
self.name = None
# type: str
self.description = None
# type: str
self.acronym = None
# type: str
self.sourceUri = None
# type: str
self.sourceReference = None
# reference to Reference: identifier
self.exampleFor = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ImplementationGuide_Resource',
'child_variable': 'sourceReference'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ImplementationGuide_Resource',
'child_variable': 'exampleFor'},
]
class ImplementationGuide_Global(fhirbase):
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide
into a logical whole and to publish a computable definition of all the
parts.
Attributes:
type: The type of resource that all instances must conform to.
profile: A reference to the profile that all instances must conform
to.
"""
__name__ = 'ImplementationGuide_Global'
def __init__(self, dict_values=None):
self.type = None
# type: str
self.profile = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ImplementationGuide_Global',
'child_variable': 'profile'},
]
class ImplementationGuide_Page(fhirbase):
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide
into a logical whole and to publish a computable definition of all the
parts.
Attributes:
source: The source address for the page.
title: A short title used to represent this page in navigational
structures such as table of contents, bread crumbs, etc.
kind: The kind of page that this is. Some pages are autogenerated
(list, example), and other kinds are of interest so that tools can
navigate the user to the page of interest.
type: For constructed pages, what kind of resources to include in the
list.
package: For constructed pages, a list of packages to include in the
page (or else empty for everything).
format: The format of the page.
page: Nested Pages/Sections under this page.
"""
__name__ = 'ImplementationGuide_Page'
def __init__(self, dict_values=None):
self.source = None
# type: str
self.title = None
# type: str
self.kind = None
# type: str
# possible values: page, example, list, include, directory,
# dictionary, toc, resource
self.type = None
# type: list
self.package = None
# type: list
self.format = None
# type: str
self.page = None
# type: list
# reference to ImplementationGuide_Page
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.kind is not None:
for value in self.kind:
if value is not None and value.lower() not in [
'page', 'example', 'list', 'include', 'directory', 'dictionary',
'toc', 'resource']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'page, example, list, include, directory, dictionary, toc, '
'resource'))
def get_relationships(self):
return [
{'parent_entity': 'ImplementationGuide_Page',
'parent_variable': 'object_id',
'child_entity': 'ImplementationGuide_Page',
'child_variable': 'page'},
]
| 35.31828 | 91 | 0.623211 | [
"MIT"
] | Hector-hedb12/Cardea | cardea/fhir/ImplementationGuide.py | 16,423 | Python |
# Copyright 2015-2015 by Eric Rasche. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.AlignIO support for "xmfa" output from Mauve/ProgressiveMauve.
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
For example, consider a progressiveMauve alignment file containing the following::
#FormatVersion Mauve1
#Sequence1File a.fa
#Sequence1Entry 1
#Sequence1Format FastA
#Sequence2File b.fa
#Sequence2Entry 2
#Sequence2Format FastA
#Sequence3File c.fa
#Sequence3Entry 3
#Sequence3Format FastA
#BackboneFile three.xmfa.bbcols
> 1:0-0 + a.fa
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
> 2:5417-5968 + b.fa
TTTAAACATCCCTCGGCCCGTCGCCCTTTTATAATAGCAGTACGTGAGAGGAGCGCCCTAAGCTTTGGGAAATTCAAGC-
--------------------------------------------------------------------------------
CTGGAACGTACTTGCTGGTTTCGCTACTATTTCAAACAAGTTAGAGGCCGTTACCTCGGGCGAACGTATAAACCATTCTG
> 3:9476-10076 - c.fa
TTTAAACACCTTTTTGGATG--GCCCAGTTCGTTCAGTTGTG-GGGAGGAGATCGCCCCAAACGTATGGTGAGTCGGGCG
TTTCCTATAGCTATAGGACCAATCCACTTACCATACGCCCGGCGTCGCCCAGTCCGGTTCGGTACCCTCCATGACCCACG
---------------------------------------------------------AAATGAGGGCCCAGGGTATGCTT
=
> 2:5969-6015 + b.fa
-----------------------
GGGCGAACGTATAAACCATTCTG
> 3:9429-9476 - c.fa
TTCGGTACCCTCCATGACCCACG
AAATGAGGGCCCAGGGTATGCTT
This is a multiple sequence alignment with multiple aligned sections, so you
would probably load this using the Bio.AlignIO.parse() function:
>>> from Bio import AlignIO
>>> align = AlignIO.parse("Mauve/simple.xmfa", "mauve")
>>> alignments = list(align)
>>> for aln in alignments:
... print(align)
SingleLetterAlphabet() alignment with 3 rows and 240 columns
--------------------------------------------...--- 1
TTTAAACATCCCTCGGCCCGTCGCCCTTTTATAATAGCAGTACG...CTG 2
TTTAAACACCTTTTTGGATG--GCCCAGTTCGTTCAGTTGTG-G...CTT 3
SingleLetterAlphabet() alignment with 3 rows and 46 columns
---------------------------------------------- 1
-----------------------GGGCGAACGTATAAACCATTCTG 2
TTCGGTACCCTCCATGACCCACGAAATGAGGGCCCAGGGTATGCTT 3
Additional information is extracted from the XMFA file and available through
the annotation attribute of each record::
>>> for record in alignments[0]:
... print record.id, len(record), record.annotations
1 240 {'start': 0, 'end': 0, 'strand': 1}
2 240 {'start': 5417, 'end': 5968, 'strand': 1}
3 240 {'start': 9476, 'end': 10076, 'strand': -1}
"""
from __future__ import print_function
import re
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from .Interfaces import AlignmentIterator
from .Interfaces import SequentialAlignmentWriter
XMFA_HEADER_REGEX = re.compile("> (?P<id>\d+):(?P<start>\d+)-(?P<end>\d+) (?P<strand>[+-]) (?P<name>.*)")
XMFA_HEADER_REGEX_BIOPYTHON = re.compile("> (?P<id>\d+):(?P<start>\d+)-(?P<end>\d+) (?P<strand>[+-]) (?P<name>[^#]*) # (?P<realname>.*)")
ID_LINE_FMT = "> {seq_name}:{start}-{end} {strand} {file} # {ugly_hack}\n"
def _identifier_split(identifier):
"""Return (name, start, end) string tuple from an identifier (PRIVATE)."""
id, loc, strand = identifier.split(':')
start, end = map(int, loc.split('-'))
start -= 1
return id, start, end, strand
class MauveWriter(SequentialAlignmentWriter):
"""Mauve/XMFA alignment writer."""
def __init__(self, *args, **kwargs):
"""Initialize."""
super(MauveWriter, self).__init__(*args, **kwargs)
self._wrote_header = False
self._wrote_first = False
def write_alignment(self, alignment):
"""Use this to write (another) single alignment to an open file.
Note that sequences and their annotation are recorded
together (rather than having a block of annotation followed
by a block of aligned sequences).
"""
count = len(alignment)
self._length_of_sequences = alignment.get_alignment_length()
# NOTE - For now, the alignment object does not hold any per column
# or per alignment annotation - only per sequence.
if count == 0:
raise ValueError("Must have at least one sequence")
if self._length_of_sequences == 0:
raise ValueError("Non-empty sequences are required")
if not self._wrote_header:
self._wrote_header = True
self.handle.write("#FormatVersion Mauve1\n")
# There are some more headers, but we ignore those for now.
# Sequence1File unknown.fa
# Sequence1Entry 1
# Sequence1Format FastA
for i in range(1, count + 1):
self.handle.write('#Sequence%sEntry\t%s\n' % (i, i))
for idx, record in enumerate(alignment):
self._write_record(record, record_idx=idx)
self.handle.write('=\n')
def _write_record(self, record, record_idx=0):
"""Write a single SeqRecord to the file (PRIVATE)."""
if self._length_of_sequences != len(record.seq):
raise ValueError("Sequences must all be the same length")
seq_name = record.name
try:
seq_name = str(int(record.name))
except ValueError:
seq_name = str(record_idx + 1)
# We remove the "/{start}-{end}" before writing, as it cannot be part
# of the produced XMFA file.
if "start" in record.annotations and "end" in record.annotations:
suffix0 = "/%s-%s" % (str(record.annotations["start"]),
str(record.annotations["end"]))
suffix1 = "/%s-%s" % (str(record.annotations["start"] + 1),
str(record.annotations["end"]))
if seq_name[-len(suffix0):] == suffix0:
seq_name = seq_name[:-len(suffix0)]
if seq_name[-len(suffix1):] == suffix1:
seq_name = seq_name[:-len(suffix1)]
if "start" in record.annotations \
and "end" in record.annotations \
and "strand" in record.annotations:
id_line = ID_LINE_FMT.format(
seq_name=seq_name, start=record.annotations["start"] + 1, end=record.annotations["end"],
strand=("+" if record.annotations["strand"] == 1 else "-"), file=record.name + '.fa',
ugly_hack=record.id
)
lacking_annotations = False
else:
id_line = ID_LINE_FMT.format(
seq_name=seq_name, start=0, end=0, strand='+',
file=record.name + '.fa', ugly_hack=record.id
)
lacking_annotations = True
# If the sequence is an empty one, skip writing it out
if (':0-0 ' in id_line or ':1-0 ' in id_line) and not lacking_annotations:
# Except in the first LCB
if not self._wrote_first:
self._wrote_first = True
# The first LCB we write out is special, and must list ALL
# sequences, for the Mauve GUI
# http://darlinglab.org/mauve/user-guide/files.html#non-standard-xmfa-formatting-used-by-the-mauve-gui
id_line = ID_LINE_FMT.format(
seq_name=seq_name, start=0, end=0, strand='+',
file=record.name + '.fa', ugly_hack=record.id
)
self.handle.write(id_line + '\n')
# Alignments lacking a start/stop/strand were generated by
# BioPython on load, and shouldn't exist according to XMFA
else:
# In other blocks, we only write sequences if they exist in a given
# alignment.
self.handle.write(id_line)
for i in range(0, len(record.seq), 80):
self.handle.write("%s\n" % str(record.seq[i:i + 80]))
class MauveIterator(AlignmentIterator):
"""Mauve xmfa alignment iterator."""
_ids = [] # for caching IDs between __next__ calls
def __next__(self):
"""Parse the next alignment from the handle."""
handle = self.handle
line = handle.readline()
if not line:
raise StopIteration
# Strip out header comments
while line and line.strip().startswith('#'):
line = handle.readline()
seqs = {}
seq_regions = {}
passed_end_alignment = False
latest_id = None
while True:
if not line:
break # end of file
line = line.strip()
if line.startswith('='):
# There may be more data, but we've reached the end of this
# alignment
break
elif line.startswith('>'):
m = XMFA_HEADER_REGEX_BIOPYTHON.match(line)
if not m:
m = XMFA_HEADER_REGEX.match(line)
if not m:
raise ValueError("Malformed header line: %s", line)
parsed_id = m.group('id')
parsed_data = {}
for key in ('start', 'end', 'id', 'strand', 'name', 'realname'):
try:
value = m.group(key)
if key == 'start':
value = int(value)
# Convert to zero based counting
if value > 0:
value -= 1
if key == 'end':
value = int(value)
parsed_data[key] = value
except IndexError:
# This will occur if we're asking for a group that
# doesn't exist. It's fine.
pass
seq_regions[parsed_id] = parsed_data
if parsed_id not in self._ids:
self._ids.append(parsed_id)
seqs.setdefault(parsed_id, '')
latest_id = parsed_id
else:
assert not passed_end_alignment
if latest_id is None:
raise ValueError("Saw sequence before definition line")
seqs[latest_id] += line
line = handle.readline()
assert len(seqs) <= len(self._ids)
self.ids = self._ids
self.sequences = seqs
if self._ids and seqs:
alignment_length = max(map(len, list(seqs.values())))
records = []
for id in self._ids:
if id not in seqs or len(seqs[id]) == 0 \
or len(seqs[id]) == 0:
seq = '-' * alignment_length
else:
seq = seqs[id]
if alignment_length != len(seq):
raise ValueError("Sequences have different lengths, or repeated identifier")
# Sometimes we don't see a particular sequence in the
# alignment, so we skip that record since it isn't present in
# that LCB/alignment
if id not in seq_regions:
continue
if (seq_regions[id]['start'] != 0 or seq_regions[id]['end'] != 0):
suffix = '/{start}-{end}'.format(**seq_regions[id])
if 'realname' in seq_regions[id]:
corrected_id = seq_regions[id]['realname']
else:
corrected_id = seq_regions[id]['name']
if corrected_id.count(suffix) == 0:
corrected_id += suffix
else:
if 'realname' in seq_regions[id]:
corrected_id = seq_regions[id]['realname']
else:
corrected_id = seq_regions[id]['name']
record = SeqRecord(
Seq(seq, self.alphabet),
id=corrected_id,
name=id
)
record.annotations["start"] = seq_regions[id]['start']
record.annotations["end"] = seq_regions[id]['end']
record.annotations["strand"] = 1 if seq_regions[id]['strand'] == '+' else -1
records.append(record)
return MultipleSeqAlignment(records, self.alphabet)
else:
raise StopIteration
| 40.089783 | 137 | 0.546915 | [
"BSD-3-Clause"
] | BioinfoCat/biopython | Bio/AlignIO/MauveIO.py | 12,949 | Python |
import logging
import os
import json
import tensorflow as tf
import pandas as pd
import numpy as np
from tqdm import tqdm
import datetime
from pdb import set_trace
from time import sleep
from openpyxl import load_workbook, Workbook
from openpyxl.worksheet.table import Table
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras import Model
# Own modules
import cvnn
import cvnn.layers as layers
import cvnn.dataset as dp
from cvnn.data_analysis import MonteCarloAnalyzer, Plotter, get_confusion_matrix
from cvnn.layers import ComplexDense, ComplexDropout
from cvnn.utils import transform_to_real, randomize, transform_to_real_map_function
from cvnn.real_equiv_tools import get_real_equivalent
from cvnn.utils import median_error
from cvnn.initializers import ComplexGlorotUniform
# typing
from pathlib import Path
from typing import Union, Optional, List, Tuple
from cvnn.activations import t_activation
from tensorflow import data
from typing import Type
logger = logging.getLogger(cvnn.__name__)
DEFAULT_OUTPUT_ACT = 'softmax_real_with_abs'
t_path = Union[str, Path]
class MonteCarlo:
def __init__(self):
"""
Class that allows the statistical comparison of several models on the same dataset
"""
self.models = []
self.pandas_full_data = pd.DataFrame()
self.monte_carlo_analyzer = MonteCarloAnalyzer() # All at None
self.verbose = 1
self.output_config = {
'plot_all': False,
'confusion_matrix': False,
'excel_summary': True,
'summary_of_run': True,
'tensorboard': False,
'save_weights': False,
'safety_checkpoints': False
}
def add_model(self, model: Type[Model]):
"""
Adds a cvnn.CvnnModel to the list to then compare between them
"""
self.models.append(model)
@staticmethod
def _parse_verbose(verbose: Union[str, int, bool]) -> int:
if isinstance(verbose, bool):
verbose = 2 if verbose else 1
elif isinstance(verbose, str):
if verbose.lower() == 'silent':
verbose = 0
elif verbose.lower() == 'debug':
verbose = 2
else:
raise ValueError(f"Unknown verbose mode {verbose}")
else:
try:
verbose = int(verbose)
if verbose > 2 or verbose < 0:
raise ValueError(f"verbose should be one of 0, 1 or 2, received {verbose}")
except Exception as e:
raise ValueError(f"Cannot cast verbose = {verbose} to int")
return verbose
def run(self, x, y, data_summary: str = '',
real_cast_modes: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,
validation_split: float = 0.2,
validation_data: Optional[Union[Tuple[np.ndarray, np.ndarray], data.Dataset]] = None,
# TODO: Add the tuple of validation data details.
test_data: Optional[Union[Tuple[np.ndarray, np.ndarray], data.Dataset]] = None,
iterations: int = 100, epochs: int = 10, batch_size: int = 100, early_stop: bool = False,
shuffle: bool = True, verbose: Optional[Union[bool, int, str]] = 1, display_freq: int = 1,
same_weights: bool = False, process_dataset: bool = True):
"""
This function is used to compare all models added with `self.add_model` method.
Runs the iteration dataset (x, y).
1. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.
2. Saves several files into ./log/montecarlo/date/of/run/
2.1. run_summary.txt: Summary of the run models and data
2.2. run_data.csv: Full information of performance of iteration of each model at each epoch
2.3. <model.name>_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch
2.4. (Optional with parameter plot_all)
`plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs).
- A tf.data dataset. Should return a tuple (inputs, targets). Preferred data type (less overhead).
:param y: Labels/Target data. Like the input data x, it could be either Numpy array(s) or TensorFlow tensor(s).
If f x is a dataset then y will be ignored (default None)
:param data_summary: (String) Dataset name to keep track of it
:param real_cast_modes: mode parameter used by cvnn.utils.transform_to_real to be used when the model to
train is real-valued. One of the following:
- String with the mode listed in cvnn.utils.transform_to_real to be used by all the real-valued models to
cast complex data to real.
- List or Tuple of strings: Same size of self.models. mode on how to cast complex data to real for each
model in self.model.
real_cast_modes[i] will indicate how to cast data for self.models[i] (ignored when model is complex).
:param validation_split: Float between 0 and 1.
Percentage of the input data to be used as test set (the rest will be use as train set)
Default: 0.0 (No validation set).
This input is ignored if validation_data is given.
:param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. This parameter takes precedence over validation_split.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param test_data: Data on which to evaluate the loss and any model metrics at the end of a model training.
The model will not be trained on this data.
If test data is not None (default) it will generate a file called `test_results.csv` with the
statistical results from the test data.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param iterations: Number of iterations to be done for each model
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Integer (Default 1). Only relevant if validation data is provided.
Frequency on terms of epochs before running the validation.
:param shuffle: (Boolean) Whether to shuffle the training data before each epoch.
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param early_stop: (Default: False) Wheather to implement early stop on training.
:param same_weights: (Default False) If True it will use the same weights at each iteration.
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
"""
if verbose:
self.verbose = self._parse_verbose(verbose)
test_data_cols = None
if test_data is not None:
test_data_cols = ['network'] + [n.get_config()['name'] for n in self.models[0].metrics]
real_cast_modes = self._check_real_cast_modes(real_cast_modes)
confusion_matrix, pbar, test_results = self._beginning_callback(iterations, epochs, batch_size,
shuffle, data_summary, test_data_cols)
w_save = [] # TODO: Find a better method
for model in self.models: # ATTENTION: This will make all models have the SAME weights, not ideal
w_save.append(model.get_weights()) # Save model weight
# np.save(self.monte_carlo_analyzer.path / "initial_debug_weights.npy", np.array(w_save)) # TODO
for it in range(iterations):
if self.verbose == 2:
logger.info("Iteration {}/{}".format(it + 1, iterations))
for i, model in enumerate(self.models):
x_fit, val_data_fit, test_data_fit = self._get_fit_dataset(model.inputs[0].dtype.is_complex, x,
validation_data, test_data,
real_cast_modes[i],
process_dataset=process_dataset)
clone_model = tf.keras.models.clone_model(model)
if isinstance(model.loss, tf.keras.losses.Loss):
loss = model.loss.__class__.from_config(config=model.loss.get_config())
else:
loss = model.loss
clone_model.compile(optimizer=model.optimizer.__class__.from_config(model.optimizer.get_config()),
loss=loss,
metrics=['accuracy']) # TODO: Until the issue is solved, I need to force metrics
# https://github.com/tensorflow/tensorflow/issues/40030
# https://stackoverflow.com/questions/62116136/tensorflow-keras-metrics-not-showing/69193373
if same_weights:
clone_model.set_weights(w_save[i])
temp_path = self.monte_carlo_analyzer.path / f"run/iteration{it}_model{i}_{model.name}"
os.makedirs(temp_path, exist_ok=True)
callbacks = []
if self.output_config['tensorboard']:
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=temp_path / 'tensorboard',
histogram_freq=1)
callbacks.append(tensorboard_callback)
if early_stop:
eas = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
callbacks.append(eas)
run_result = clone_model.fit(x_fit, y, validation_split=validation_split, validation_data=val_data_fit,
epochs=epochs, batch_size=batch_size,
verbose=self.verbose==2, validation_freq=display_freq,
callbacks=callbacks, shuffle=shuffle)
test_results = self._inner_callback(clone_model, validation_data, confusion_matrix, real_cast_modes[i],
i, run_result, test_results, test_data_fit, temp_path)
self._outer_callback(pbar)
return self._end_callback(x, y, iterations, data_summary, real_cast_modes, epochs, batch_size,
confusion_matrix, test_results, pbar, w_save)
# TODO: What was the idea of save_weights? Is it necessary or it was only debugging?
def _check_real_cast_modes(self, real_cast_modes):
# TODO: I can check the real models input size corresponds to the real_cast_mode. And change it with a warning?
if real_cast_modes is None:
real_cast_modes = "real_imag"
if isinstance(real_cast_modes, str):
real_cast_modes = [real_cast_modes for _ in self.models]
# I suppose then real_cast_modes is a list or tuple. Not checked TODO
assert len(real_cast_modes) == len(self.models), "Size of real_cast_modes should be equal to the total models"
return real_cast_modes
@staticmethod
def _transform_dataset(is_complex: bool, validation_data, polar):
val_data_fit = None
if validation_data is not None:
if isinstance(validation_data, tf.data.Dataset):
if not is_complex:
val_data_fit = validation_data.map(lambda x, y: transform_to_real_map_function(x, y, mode=polar))
else:
val_data_fit = validation_data
elif (is_complex and validation_data[0].dtype.is_complex) or \
(not is_complex and validation_data[0].dtype.is_floating):
val_data_fit = validation_data
elif is_complex and not validation_data[0].dtype.is_complex:
raise NotImplementedError(f"The input dataset is expected to be complex")
else:
val_data_fit = (transform_to_real(validation_data[0], mode=polar), validation_data[1])
return val_data_fit
def _get_fit_dataset(self, is_complex: bool, x, validation_data, test_data, polar, process_dataset):
if not process_dataset:
return x, validation_data, test_data
if isinstance(x, tf.data.Dataset):
if not is_complex:
x_fit = x.map(lambda imag, label: transform_to_real_map_function(imag, label, mode=polar))
else:
x_fit = x
elif (is_complex and tf.dtypes.as_dtype(x.dtype).is_complex) or \
(not is_complex and tf.dtypes.as_dtype(x.dtype).is_floating):
x_fit = x
elif is_complex and not tf.dtypes.as_dtype(x.dtype).is_complex:
raise NotImplementedError(f"Cast real dataset to complex not yet implemented, "
f"please provide the dataset in complex form.")
else:
x_fit = transform_to_real(x, mode=polar)
val_data_fit = self._transform_dataset(is_complex, validation_data, polar)
test_data_fit = self._transform_dataset(is_complex, test_data, polar)
return x_fit, val_data_fit, test_data_fit
# Callbacks
def _beginning_callback(self, iterations, epochs, batch_size, shuffle, data_summary, test_data_cols):
confusion_matrix = None
pbar = None
# Reset data frame
self.pandas_full_data = pd.DataFrame()
if self.verbose == 1:
pbar = tqdm(total=iterations)
if self.output_config['confusion_matrix']:
confusion_matrix = []
for mdl in self.models:
confusion_matrix.append({"name": mdl.name, "matrix": pd.DataFrame()})
if self.output_config['summary_of_run']:
self._save_summary_of_run(self._run_summary(iterations, epochs, batch_size, shuffle), data_summary)
test_results = None
if test_data_cols is not None:
# TODO: Consider making it a tuple so it is unmutable
test_results = pd.DataFrame(columns=test_data_cols)
return confusion_matrix, pbar, test_results
def _end_callback(self, x, y, iterations, data_summary, polar, epochs, batch_size,
confusion_matrix, test_results, pbar, w_save):
if self.verbose == 1:
pbar.close()
self.pandas_full_data = self.pandas_full_data.reset_index(drop=True)
self.monte_carlo_analyzer.set_df(self.pandas_full_data)
if self.output_config['save_weights']:
np.save(self.monte_carlo_analyzer.path / "initial_weights.npy", np.array(w_save))
if self.output_config['excel_summary']:
try: # TODO: Think this better
num_classes = str(y.shape[1])
except IndexError:
num_classes = max(y) - min(y)
self._save_montecarlo_log(iterations=iterations,
dataset_name=data_summary,
num_classes=num_classes, polar_mode='Yes' if polar else 'No',
dataset_size=str(x.shape[0]), features_size=str(x.shape[1:]),
epochs=epochs, batch_size=batch_size
)
if self.output_config['confusion_matrix']:
if confusion_matrix is not None:
for model_cm in confusion_matrix:
# If the first prediction does not predict a given class, the order will be wrong, so I sort it.
cm = model_cm['matrix']
cols = cm.columns.tolist()
strs = list(filter(lambda x: type(x) == str, cols))
ints = list(filter(lambda x: type(x) == int, cols))
ints.sort()
strs.sort()
cm_sorted = cm.fillna(0)[ints + strs] # Sorted confusion matrix
model_cm['matrix'] = cm_sorted.groupby(cm_sorted.index).mean()
model_cm['matrix'].to_csv(
self.monte_carlo_analyzer.path / (model_cm['name'] + "_confusion_matrix.csv"))
if test_results is not None:
test_results.groupby('network').describe().to_csv(self.monte_carlo_analyzer.path / ("test_results.csv"))
if self.output_config['plot_all']:
return self.monte_carlo_analyzer.do_all()
def _inner_callback(self, model, validation_data, confusion_matrix, polar, model_index,
run_result, test_results, test_data_fit, temp_path):
# TODO: Must have save_csv_history to do the montecarlo results latter
# Save all results
plotter = Plotter(path=temp_path, data_results_dict=run_result.history, model_name=model.name)
self.pandas_full_data = pd.concat([self.pandas_full_data, plotter.get_full_pandas_dataframe()], sort=False)
if self.output_config['confusion_matrix']:
if validation_data is not None: # TODO: Haven't yet done all cases here!
if model.inputs[0].dtype.is_complex:
x_test, y_test = validation_data
else:
x_test, y_test = (transform_to_real(validation_data[0], mode=polar), validation_data[1])
try:
confusion_matrix[model_index]["matrix"] = pd.concat((confusion_matrix[model_index]["matrix"],
get_confusion_matrix(model.predict(x_test),
y_test)))
except ValueError:
logger.warning("ValueError: Could not do confusion matrix. No objects to concatenate.")
# TODO: I think confusion matrix stopped working.
else:
print("Confusion matrix only available for validation_data")
if self.output_config['save_weights']:
# model.save_weights(temp_path / "final_weights")
np.save(temp_path / "final_weights.npy", model.get_weights())
if test_results is not None:
tmp_result = [model.name] + model.evaluate(x=test_data_fit[0], y=test_data_fit[1], verbose=0)
cols = ['network'] + [n.get_config()['name'] for n in self.models[0].metrics]
test_results = test_results.append(pd.DataFrame([tmp_result], columns=cols), ignore_index=True)
return test_results
def _outer_callback(self, pbar):
if self.verbose == 1:
pbar.update()
if self.output_config['safety_checkpoints']:
# Save checkpoint in case Monte Carlo stops in the middle
self.pandas_full_data.to_csv(self.monte_carlo_analyzer.path / "run_data.csv", index=False)
# Saver functions
def _save_montecarlo_log(self, iterations, dataset_name, num_classes, polar_mode, dataset_size,
features_size, epochs, batch_size):
fieldnames = [
'iterations',
'dataset', '# Classes', "Dataset Size", 'Feature Size', # Dataset information
'models', 'epochs', 'batch size', "Polar Mode", # Models information
'path', "cvnn version" # Library information
]
row_data = [
iterations,
dataset_name, num_classes, dataset_size, features_size,
'-'.join([str(model.name) for model in self.models]), epochs, batch_size, polar_mode,
str(self.monte_carlo_analyzer.path), cvnn.__version__
]
_create_excel_file(fieldnames, row_data, './log/monte_carlo_summary.xlsx')
@staticmethod
def _run_summary(iterations: int, epochs: int, batch_size: int, shuffle: bool) -> str:
ret_str = "Monte Carlo run\n"
ret_str += f"\tIterations: {iterations}\n"
ret_str += f"\tepochs: {epochs}\n"
ret_str += f"\tbatch_size: {batch_size}\n"
if shuffle:
ret_str += "\tShuffle data at each iteration\n"
else:
ret_str += "\tData is not shuffled at each iteration\n"
return ret_str
def _save_summary_of_run(self, run_summary, data_summary):
"""
Saves 2 files:
- run_summary.txt: A user-friendly resume of the monte carlo run.
- models_details.json: A full serialized version of the models.
Contains info that lacks in the txt file like the loss or optimizer.
"""
with open(str(self.monte_carlo_analyzer.path / "run_summary.txt"), "w") as file:
file.write(run_summary)
file.write(data_summary + "\n")
file.write("Models:\n")
for model in self.models:
model.summary(print_fn=lambda x: file.write(x + '\n'))
json_dict = {}
for i, model in enumerate(self.models):
json_dict[str(i)] = {
'name': model.name,
'loss': model.loss if isinstance(model.loss, str) else model.loss.get_config(), # Not yet support function loss
'optimizer': model.optimizer.get_config(),
'layers': [layer.get_config() for layer in model.layers]
}
with open(self.monte_carlo_analyzer.path / 'models_details.json', 'w') as fp:
json.dump(str(json_dict), fp)
class RealVsComplex(MonteCarlo):
"""
Inherits from MonteCarlo. Compares a complex model with it's real equivalent.
Example usage:
```
# Assume you already have complex data 'x' with its labels 'y'... and a Cvnn model.
montecarlo = RealVsComplex(complex_model)
montecarlo.run(x, y)
```
"""
def __init__(self, complex_model: Type[Model], capacity_equivalent: bool = True, equiv_technique: str = 'ratio'):
"""
:param complex_model: Complex keras model (ex: sequential)
:param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or
trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)
- True, it creates a capacity-equivalent model in terms of trainable parameters
- False, it will double all layer size (except the last one if classifier=True)
:param equiv_technique: Used to define the strategy of the capacity equivalent model.
This parameter is ignored if capacity_equivalent=False
- 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'
- 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between
multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.
"""
super().__init__()
# add models
self.add_model(complex_model)
self.add_model(get_real_equivalent(complex_model, capacity_equivalent=capacity_equivalent,
equiv_technique=equiv_technique, name="real_network"))
def _save_montecarlo_log(self, iterations, dataset_name, num_classes, polar_mode, dataset_size,
features_size, epochs, batch_size):
max_epoch = self.pandas_full_data['epoch'].max()
epoch_filter = self.pandas_full_data['epoch'] == max_epoch
complex_filter = self.pandas_full_data['network'] == self.models[0].name
real_filter = self.pandas_full_data['network'] == self.models[1].name
complex_last_epochs = self.pandas_full_data[epoch_filter & complex_filter]
real_last_epochs = self.pandas_full_data[epoch_filter & real_filter]
complex_median_train = complex_last_epochs['accuracy'].median()
real_median_train = real_last_epochs['accuracy'].median()
complex_median = complex_last_epochs['val_accuracy'].median()
real_median = real_last_epochs['val_accuracy'].median()
complex_err = median_error(complex_last_epochs['accuracy'].quantile(.75),
complex_last_epochs['accuracy'].quantile(.25), iterations)
real_err = median_error(real_last_epochs['val_accuracy'].quantile(.75),
real_last_epochs['val_accuracy'].quantile(.25), iterations)
fieldnames = ['iterations', 'dataset', '# Classes', "Dataset Size", 'Feature Size', "Polar Mode",
"Optimizer", "Loss",
'epochs', 'batch size',
"Winner", "CVNN val median", "RVNN val median", 'CVNN err', 'RVNN err',
"CVNN train median", "RVNN train median",
'path', "cvnn version"
]
row_data = [iterations, dataset_name, num_classes, dataset_size, features_size, polar_mode,
# Dataset information
str(tf.keras.losses.serialize(self.models[0].loss)),
str(tf.keras.optimizers.serialize(self.models[0].optimizer)),
epochs, batch_size, # Model information
'CVNN' if complex_median > real_median else 'RVNN',
complex_median, real_median, complex_err, real_err, # Preliminary results
complex_median_train, real_median_train,
str(self.monte_carlo_analyzer.path), cvnn.__version__
]
percentage_cols = ['P', 'Q', 'L', 'M']
_create_excel_file(fieldnames, row_data, './log/rvnn_vs_cvnn_monte_carlo_summary.xlsx',
percentage_cols=percentage_cols)
# ====================================
# Monte Carlo simulation methods
# ====================================
def run_gaussian_dataset_montecarlo(iterations: int = 30, m: int = 10000, n: int = 128, param_list=None,
epochs: int = 300, batch_size: int = 100, display_freq: int = 1,
optimizer='sgd', validation_split: float = 0.2, # TODO: Add typing here
shape_raw: List[int] = None, activation: t_activation = 'cart_relu',
verbose: bool = False, do_all: bool = True, tensorboard: bool = False,
polar: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,
capacity_equivalent: bool = True, equiv_technique: str = 'ratio',
dropout: Optional[float] = None, models: Optional[List[Model]] = None,
plot_data: bool = True, early_stop: bool = False, shuffle: bool = True) -> str:
"""
This function is used to compare CVNN vs RVNN performance over statistical non-circular data.
1. Generates a complex-valued gaussian correlated noise with the characteristics given by the inputs.
2. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.
3. Saves several files into ./log/montecarlo/date/of/run/
3.1. run_summary.txt: Summary of the run models and data
3.2. run_data.csv: Full information of performance of iteration of each model at each epoch
3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch
3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch
3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param iterations: Number of iterations to be done for each model
:param m: Total size of the dataset (number of examples)
:param n: Number of features / input vector
:param param_list: A list of len = number of classes.
Each element of the list is another list of len = 3 with values: [correlation_coeff, sigma_x, sigma_y]
Example for dataset type A of paper https://arxiv.org/abs/2009.08340:
param_list = [
[0.5, 1, 1],
[-0.5, 1, 1]
]
Default: None will default to the example.
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Frequency in terms of epochs of when to do a checkpoint.
:param optimizer: Optimizer to be used. Keras optimizers are not allowed.
Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.
:param validation_split: float between 0 and 1. Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss
and any model metrics on this data at the end of each epoch.
The validation data is selected from the last samples in the x and y data provided, before shuffling.
This argument is not supported when x is a dataset, generator or keras.utils.Sequence instance.
:param shape_raw: List of sizes of each hidden layer.
For example [64] will generate a CVNN with one hidden layer of size 64.
Default None will default to example.
:param activation: Activation function to be used at each hidden layer
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param tensorboard: If True, it will generate tensorboard outputs to check training values.
:param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)
:param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()
:param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.
:param models: List of models to be compared.
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
"""
# Get parameters
if param_list is None:
param_list = [
[0.3, 1, 1],
[-0.3, 1, 1]
]
dataset = dp.CorrelatedGaussianCoeffCorrel(m, n, param_list, debug=False)
print("Database loaded...")
if models is not None:
return run_montecarlo(models=models, dataset=dataset, open_dataset=None,
iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq,
validation_split=validation_split, validation_data=None,
verbose=verbose, polar=polar, do_all=do_all, tensorboard=tensorboard, do_conf_mat=False,
plot_data=plot_data, early_stop=early_stop, shuffle=shuffle)
else:
return mlp_run_real_comparison_montecarlo(dataset=dataset, open_dataset=None, iterations=iterations,
epochs=epochs, batch_size=batch_size, display_freq=display_freq,
optimizer=optimizer, shape_raw=shape_raw, activation=activation,
verbose=verbose, polar=polar, do_all=do_all,
tensorboard=tensorboard,
capacity_equivalent=capacity_equivalent,
equiv_technique=equiv_technique,
dropout=dropout, validation_split=validation_split,
plot_data=plot_data)
def run_montecarlo(models: List[Model], dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path] = None,
iterations: int = 30,
epochs: int = 300, batch_size: int = 100, display_freq: int = 1,
validation_split: float = 0.2,
validation_data: Optional[Union[Tuple, data.Dataset]] = None,
# TODO: Add vallidation data tuple details
verbose: Union[bool, int] = False, do_conf_mat: bool = False, do_all: bool = True,
tensorboard: bool = False,
polar: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,
plot_data: bool = False, early_stop: bool = False, shuffle: bool = True,
preprocess_data: bool = True) -> str:
"""
This function is used to compare different neural networks performance.
1. Runs simulation and compares them.
2. Saves several files into ./log/montecarlo/date/of/run/
2.1. run_summary.txt: Summary of the run models and data
2.2. run_data.csv: Full information of performance of iteration of each model at each epoch
2.3. <model_name>_statistical_result.csv: Statistical results of all iterations of each model per epoch
2.4. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param models: List of cvnn.CvnnModel to be compared.
:param dataset: cvnn.dataset.Dataset with the dataset to be used on the training
:param open_dataset: (Default: None)
If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)
:param iterations: Number of iterations to be done for each model
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Frequency in terms of epochs of when to do a checkpoint.
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)
:param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()
:param validation_split: Float between 0 and 1.
Percentage of the input data to be used as test set (the rest will be use as train set)
Default: 0.0 (No validation set).
This input is ignored if validation_data is given.
:param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. This parameter takes precedence over validation_split.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param do_conf_mat: Generate a confusion matrix based on results.
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
"""
if open_dataset:
dataset = dp.OpenDataset(open_dataset) # Warning, open_dataset overwrites dataset
# Monte Carlo
monte_carlo = MonteCarlo()
for model in models:
# model.training_param_summary()
monte_carlo.add_model(model)
if not open_dataset and isinstance(dataset, dp.Dataset):
dataset.save_data(monte_carlo.monte_carlo_analyzer.path)
monte_carlo.output_config['excel_summary'] = False
monte_carlo.output_config['tensorboard'] = tensorboard
monte_carlo.output_config['confusion_matrix'] = do_conf_mat
monte_carlo.output_config['plot_all'] = do_all
if plot_data and isinstance(dataset, dp.Dataset):
dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path,
library='matplotlib')
if isinstance(dataset, dp.Dataset):
x = dataset.x
y = dataset.y
data_summary = dataset.summary()
else:
x = dataset
y = None
data_summary = ""
monte_carlo.run(x, y, iterations=iterations,
validation_split=validation_split, validation_data=validation_data,
epochs=epochs, batch_size=batch_size, display_freq=display_freq, early_stop=early_stop,
shuffle=shuffle, verbose=verbose, data_summary=data_summary, real_cast_modes=polar,
process_dataset=preprocess_data)
# Save data to remember later what I did.
_save_montecarlo_log(iterations=iterations,
path=str(monte_carlo.monte_carlo_analyzer.path),
models_names=[str(model.name) for model in models],
dataset_name=data_summary,
num_classes=str(dataset.y.shape[1]) if isinstance(dataset, dp.Dataset) else "", # TODO: GET THIS
polar_mode=str(polar),
dataset_size=str(dataset.x.shape[0]) if isinstance(dataset, dp.Dataset) else "",
features_size=str(dataset.x.shape[1]) if isinstance(dataset, dp.Dataset) else "",
epochs=epochs, batch_size=batch_size
# filename='./log/run_data.csv'
)
return str("./log/run_data.csv")
def mlp_run_real_comparison_montecarlo(dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path] = None,
iterations: int = 30,
epochs: int = 300, batch_size: int = 100, display_freq: int = 1,
optimizer='adam', # TODO: Typing
shape_raw=None, activation: t_activation = 'cart_relu',
output_activation: t_activation = DEFAULT_OUTPUT_ACT,
verbose: Union[bool, int] = False, do_all: bool = True,
polar: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,
dropout: float = 0.5, validation_split: float = 0.2,
validation_data: Optional[Union[Tuple, data.Dataset]] = None,
# TODO: Add typing of tuple
capacity_equivalent: bool = True, equiv_technique: str = 'ratio',
shuffle: bool = True, tensorboard: bool = False, do_conf_mat: bool = False,
plot_data: bool = True) -> str:
"""
This function is used to compare CVNN vs RVNN performance over any dataset.
1. Automatically creates two Multi-Layer Perceptrons (MLP), one complex and one real.
2. Runs simulation and compares them.
3. Saves several files into ./log/montecarlo/date/of/run/
3.1. run_summary.txt: Summary of the run models and data
3.2. run_data.csv: Full information of performance of iteration of each model at each epoch
3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch
3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch
3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()
:param dataset: cvnn.dataset.Dataset with the dataset to be used on the training
:param open_dataset: (None)
If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)
:param iterations: Number of iterations to be done for each model
:param epochs: Number of epochs for each iteration
:param batch_size: Batch size at each iteration
:param display_freq: Frequency in terms of epochs of when to do a checkpoint.
:param optimizer: Optimizer to be used. Keras optimizers are not allowed.
Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.
:param shape_raw: List of sizes of each hidden layer.
For example [64] will generate a CVNN with one hidden layer of size 64.
Default None will default to example.
:param activation: Activation function to be used at each hidden layer
:param verbose: Different modes according to number:
- 0 or 'silent': No output at all
- 1 or False: Progress bar per iteration
- 2 or True or 'debug': Progress bar per epoch
:param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)
:param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()
:param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.
:param validation_split: Float between 0 and 1.
Percentage of the input data to be used as test set (the rest will be use as train set)
Default: 0.0 (No validation set).
This input is ignored if validation_data is given.
:param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. This parameter takes precedence over validation_split.
It can be:
- tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).
- A tf.data dataset.
:param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or
trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)
- True, it creates a capacity-equivalent model in terms of trainable parameters
- False, it will double all layer size (except the last one if classifier=True)
:param equiv_technique: Used to define the strategy of the capacity equivalent model.
This parameter is ignored if capacity_equivalent=False
- 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'
- 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between
multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.
:param shuffle: TODO
:return: (string) Full path to the run_data.csv generated file.
It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.
"""
if shape_raw is None:
shape_raw = [64]
if open_dataset:
dataset = dp.OpenDataset(open_dataset) # Warning, open_dataset overwrites dataset
input_size = dataset.x.shape[1] # Size of input
output_size = dataset.y.shape[1] # Size of output
complex_network = get_mlp(input_size=input_size, output_size=output_size,
shape_raw=shape_raw, activation=activation, dropout=dropout,
output_activation=output_activation, optimizer=optimizer)
# Monte Carlo
monte_carlo = RealVsComplex(complex_network,
capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique)
monte_carlo.output_config['tensorboard'] = tensorboard
# monte_carlo.output_config['confusion_matrix'] = do_conf_mat
monte_carlo.output_config['plot_all'] = do_all
monte_carlo.output_config['excel_summary'] = False
monte_carlo.output_config['confusion_matrix'] = do_conf_mat
if plot_data:
dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path,
library='matplotlib')
sleep(1) # I have error if not because not enough time passed since creation of models to be in diff folders
monte_carlo.run(dataset.x, dataset.y, iterations=iterations,
epochs=epochs, batch_size=batch_size, display_freq=display_freq,
shuffle=shuffle, verbose=verbose, data_summary=dataset.summary(), real_cast_modes=polar,
validation_split=validation_split, validation_data=validation_data)
# Save data to remember later what I did.
max_epoch = monte_carlo.pandas_full_data['epoch'].max()
epoch_filter = monte_carlo.pandas_full_data['epoch'] == max_epoch
complex_filter = monte_carlo.pandas_full_data['network'] == "complex_network"
real_filter = monte_carlo.pandas_full_data['network'] == "real_network"
complex_last_epochs = monte_carlo.pandas_full_data[epoch_filter & complex_filter]
real_last_epochs = monte_carlo.pandas_full_data[epoch_filter & real_filter]
complex_median_train = complex_last_epochs['accuracy'].median()
real_median_train = real_last_epochs['accuracy'].median()
try:
complex_median = complex_last_epochs['val_accuracy'].median()
real_median = real_last_epochs['val_accuracy'].median()
complex_err = median_error(complex_last_epochs['val_accuracy'].quantile(.75),
complex_last_epochs['val_accuracy'].quantile(.25), iterations)
real_err = median_error(real_last_epochs['val_accuracy'].quantile(.75),
real_last_epochs['val_accuracy'].quantile(.25), iterations)
winner = 'CVNN' if complex_median > real_median else 'RVNN'
except KeyError:
complex_median = None
real_median = None
complex_err = median_error(complex_last_epochs['accuracy'].quantile(.75),
complex_last_epochs['accuracy'].quantile(.25), iterations)
real_err = median_error(real_last_epochs['accuracy'].quantile(.75),
real_last_epochs['accuracy'].quantile(.25), iterations)
if complex_median_train > real_median_train:
winner = 'CVNN'
elif complex_median_train == real_median_train:
winner = None
else:
winner = 'RVNN'
_save_rvnn_vs_cvnn_montecarlo_log(
iterations=iterations,
path=str(monte_carlo.monte_carlo_analyzer.path),
dataset_name=dataset.dataset_name,
optimizer=str(complex_network.optimizer.__class__),
loss=str(complex_network.loss.__class__),
hl=str(len(shape_raw)), shape=str(shape_raw),
dropout=str(dropout), num_classes=str(dataset.y.shape[1]),
polar_mode=str(polar),
activation=activation,
dataset_size=str(dataset.x.shape[0]), feature_size=str(dataset.x.shape[1]),
epochs=epochs, batch_size=batch_size,
winner=winner,
complex_median=complex_median, real_median=real_median,
complex_median_train=complex_median_train, real_median_train=real_median_train,
complex_err=complex_err, real_err=real_err,
filename='./log/mlp_montecarlo_summary.xlsx'
)
return str(monte_carlo.monte_carlo_analyzer.path / "run_data.csv")
def get_mlp(input_size, output_size,
shape_raw=None, activation="cart_relu", dropout=0.5,
output_activation='softmax_real_with_abs', optimizer="sgd", name="complex_network"):
if shape_raw is None:
shape_raw = [100, 50]
shape = [
layers.ComplexInput(input_shape=input_size)
]
if len(shape_raw) == 0:
shape.append(
ComplexDense(units=output_size, activation=output_activation, input_dtype=np.complex64)
)
else: # len(shape_raw) > 0:
for s in shape_raw:
shape.append(ComplexDense(units=s, activation=activation))
if dropout is not None:
shape.append(ComplexDropout(rate=dropout))
shape.append(ComplexDense(units=output_size, activation=output_activation))
complex_network = tf.keras.Sequential(shape, name=name)
complex_network.compile(optimizer=optimizer, loss=tf.keras.losses.CategoricalCrossentropy(), metrics=['accuracy'])
return complex_network
# ====================================
# Excel logging
# ====================================
def _create_excel_file(fieldnames: List[str], row_data: List, filename: Optional[t_path] = None,
percentage_cols: Optional[List[str]] = None):
if filename is None:
filename = './log/montecarlo_summary.xlsx'
file_exists = os.path.isfile(filename)
if file_exists:
wb = load_workbook(filename)
ws = wb.worksheets[0]
del ws.tables["Table1"]
else:
wb = Workbook()
ws = wb.worksheets[0]
ws.append(fieldnames)
ws.append(row_data)
# TODO: What if len(row_data) is longer than the dictionary? It corresponds with excel's column names?
tab = Table(displayName="Table1", ref="A1:" + str(chr(64 + len(row_data))) + str(ws.max_row))
if percentage_cols is not None:
for col in percentage_cols:
ws[col + str(ws.max_row)].number_format = '0.00%'
ws.add_table(tab)
wb.save(filename)
def _save_rvnn_vs_cvnn_montecarlo_log(iterations, path, dataset_name, hl, shape, dropout, num_classes, polar_mode,
activation, optimizer, loss,
dataset_size, feature_size, epochs, batch_size, winner,
complex_median, real_median, complex_err, real_err,
complex_median_train, real_median_train,
comments='', filename=None):
fieldnames = ['iterations', 'dataset', '# Classes', "Dataset Size", 'Feature Size', "Polar Mode", "Optimizer",
"Loss",
'HL', 'Shape', 'Dropout', "Activation Function", 'epochs', 'batch size',
"Winner", "CVNN median", "RVNN median", 'CVNN err', 'RVNN err',
"CVNN train median", "RVNN train median",
'path', "cvnn version", "Comments"
]
row_data = [iterations, dataset_name, num_classes, dataset_size, feature_size, polar_mode, # Dataset information
optimizer, str(loss), hl, shape, dropout, activation, epochs, batch_size, # Model information
winner, complex_median, real_median, complex_err, real_err, # Preliminary results
complex_median_train, real_median_train,
path, cvnn.__version__, comments # Library information
]
percentage_cols = ['P', 'Q', 'R', 'S', 'T', 'U']
_create_excel_file(fieldnames, row_data, filename, percentage_cols=percentage_cols)
def _save_montecarlo_log(iterations, path, dataset_name, models_names, num_classes, polar_mode, dataset_size,
features_size, epochs, batch_size, filename=None):
fieldnames = [
'iterations',
'dataset', '# Classes', "Dataset Size", 'Feature Size', # Dataset information
'models', 'epochs', 'batch size', "Polar Mode", # Models information
'path', "cvnn version" # Library information
]
row_data = [
iterations,
dataset_name, num_classes, dataset_size, features_size,
'-'.join(models_names), epochs, batch_size, polar_mode,
path, cvnn.__version__
]
_create_excel_file(fieldnames, row_data, filename)
if __name__ == "__main__":
# Base case with one hidden layer size 64 and dropout 0.5
run_gaussian_dataset_montecarlo(iterations=10, dropout=0.5)
| 59.392534 | 128 | 0.629793 | [
"MIT"
] | NEGU93/cvnn | cvnn/montecarlo.py | 52,503 | Python |
# -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class DropboxHookTests(WebhookTestCase):
STREAM_NAME = 'test'
URL_TEMPLATE = "/api/v1/external/dropbox?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'dropbox'
def test_file_updated(self) -> None:
expected_subject = u"Dropbox"
expected_message = u"File has been updated on Dropbox!"
self.send_and_test_stream_message('file_updated', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name: Text) -> Text:
return self.fixture_data("dropbox", fixture_name, file_type="json")
def test_verification_request(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
get_params = {'stream_name': self.STREAM_NAME,
'challenge': '9B2SVL4orbt5DxLMqJHI6pOTipTqingt2YFMIO0g06E',
'api_key': self.test_user.api_key}
result = self.client_get(self.url, get_params)
self.assert_in_response('9B2SVL4orbt5DxLMqJHI6pOTipTqingt2YFMIO0g06E', result)
| 40.896552 | 93 | 0.682968 | [
"Apache-2.0"
] | Romdeau/zulip | zerver/webhooks/dropbox/tests.py | 1,186 | Python |
from typing import Dict, Iterator, List, Optional, Tuple, Union
from ..constant.util import Amount, ItemPointer, Number
from .item_wrapper import item_type
from .other_wrapper import *
__all__ = [
'ItemType', 'Item', 'Empty',
'Accessory', 'EnchantedBook', 'ReforgeStone', 'TravelScroll',
'Bow', 'Sword',
'Axe', 'Pickaxe', 'Drill', 'Hoe', 'FishingRod', 'Armor', 'Pet', 'Minion',
'Resource',
'Crop', 'Mineral', 'Log', 'Mob',
'Recipe', 'RecipeGroup', 'Collection',
'load_item',
]
class ItemType:
pass
@item_type
class Item(ItemType):
name: str
count: int = 1
# common | uncommon | rare | epic | legendary |
# mythic | supreme | special | very_special
rarity: str = 'common'
abilities: List[str] = []
@item_type
class Empty(ItemType):
def __repr__(self):
return '{}'
@item_type
class Accessory(ItemType):
name: str
rarity: str = 'common'
modifier: Optional[str] = None
abilities: List[str] = []
@item_type
class Armor(ItemType):
name: str
rarity: str
# helmet | chestplate | leggings | boots
part: str
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
health: int = 0
defense: int = 0
intelligence: int = 0
speed: int = 0
magic_find: int = 0
mining_speed: int = 0
mining_fortune: int = 0
true_defense: int = 0
ferocity: int = 0
sea_creature_chance: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Axe(ItemType):
name: str
rarity: str
tool_speed: int
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@item_type
class Bow(ItemType):
name: str
rarity: str
damage: int
count: int = 1
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
attack_speed: int = 0
intelligence: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Drill(ItemType):
name: str
rarity: str
breaking_power: int
mining_speed: int
mining_fortune: int = 0
damage: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@enchanted_book_type
@item_type
class EnchantedBook(ItemType):
enchantments: Dict[str, int] = {}
name: str = 'enchanted_book'
rarity: str = 'common'
@item_type
class FishingRod(ItemType):
name: str
rarity: str
damage: int = 0
strength: int = 0
ferocity: int = 0
fishing_speed: int = 0
sea_creature_chance: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
fishing_skill_req: Optional[int] = None
abilities: List[str] = []
@item_type
class Hoe(ItemType):
name: str
rarity: str
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
@item_type
class Minion(ItemType):
name: str
tier: str
cooldown: Number
slots: int
@item_type
class Pet(ItemType):
name: str
rarity: str
category: str = None
exp: float = 0.0
candy_used: int = 0
active: bool = False
health: int = 0
defense: int = 0
speed: int = 0
true_defense: int = 0
intelligence: int = 0
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
damage: int = 0
magic_find: int = 0
attack_speed: int = 0
ferocity: int = 0
sea_creature_chance: int = 0
abilities: List = []
@item_type
class Pickaxe(ItemType):
name: str
rarity: str
breaking_power: int
mining_speed: int
damage: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
abilities: List[str] = []
@item_type
class ReforgeStone(ItemType):
name: str
modifier: Optional[str] = None
category: Optional[str] = None
rarity: str = 'common'
cost: Tuple[int] = (0, 0, 0, 0, 0, 0)
mining_skill_req: Optional[int] = None
@item_type
class Sword(ItemType):
name: str
rarity: str
count: int = 1
damage: int = 0
strength: int = 0
crit_chance: int = 0
crit_damage: int = 0
attack_speed: int = 0
defense: int = 0
intelligence: int = 0
true_defense: int = 0
ferocity: int = 0
speed: int = 0
modifier: Optional[str] = None
enchantments: Dict[str, int] = {}
hot_potato: int = 0
stars: Optional[int] = None
combat_skill_req: Optional[int] = None
dungeon_skill_req: Optional[int] = None
dungeon_completion_req: Optional[int] = None
abilities: List[str] = []
@item_type
class TravelScroll(ItemType):
name: str
island: str
zone: Optional[str] = None
rarity: str = 'rare'
OBJECT_NAMES = {
'item': Item,
'empty': Empty,
'accessory': Accessory,
'armor': Armor,
'axe': Axe,
'bow': Bow,
'drill': Drill,
'enchanted_book': EnchantedBook,
'fishing_rod': FishingRod,
'hoe': Hoe,
'minion': Minion,
'pet': Pet,
'pickaxe': Pickaxe,
'reforge_stone': ReforgeStone,
'sword': Sword,
'travel_scroll': TravelScroll,
}
class Resource:
def type(self):
return type(self).__name__
@resource_type
class Crop(Resource):
name: str
amount: int = 1
farming_exp: Number = 1
@resource_type
class Log(Resource):
name: str
hardness: int = 2
foraging_exp: Number = 1
@resource_type
class Mineral(Resource):
name: str
drop: str
amount: int = 1
breaking_power: int = 0
hardness: Number = 2
exp: Amount = 1
mining_exp: Number = 1
mithril_powder: Amount = 0
@mob_type
class Mob:
name: str
level: int
health: int
defense: int = 0
damage: int = 0
true_damage: int = 0
coins: int = 0
exp: int = 0
farming_exp: int = 0
combat_exp: int = 0
fishing_exp: int = 0
drops: List[Tuple[ItemPointer, Amount, str, Number]] = []
@recipe_type
class Recipe:
name: str
category: str
ingredients: List[ItemPointer]
result: ItemPointer
collection_req: Optional[Tuple[str, int]] = None
# slayer_req: Optional[Tuple[str, int]] = None
@recipe_group_type
class RecipeGroup:
name: str
category: str
recipes: List[str]
collection_req: Optional[Tuple[str, int]] = None
# slayer_req: Optional[Tuple[str, int]] = None
@collection_type
class Collection:
name: str
category: str
levels: List[Tuple[int, Union[str, Tuple[str], Number]]]
def __iter__(self, /) -> Iterator:
return iter(self.levels)
def load_item(obj, /):
if isinstance(obj, ItemType):
return obj
elif 'type' not in obj:
return Empty()
for name, cls in OBJECT_NAMES.items():
if obj['type'] == name:
return cls.from_obj(obj)
else:
raise ValueError(f"invalid item obj type: {obj['type']!r}")
| 19.423684 | 77 | 0.618886 | [
"MIT"
] | peter-hunt/skyblock | skyblock/object/object.py | 7,381 | Python |
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia collection learner view."""
from core.controllers import base
from core.domain import collection_services
from core.domain import config_domain
from core.domain import rights_manager
from core.platform import models
import utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
def require_collection_playable(handler):
"""Decorator that checks if the user can play the given collection."""
def test_can_play(self, collection_id, **kwargs):
"""Check if the current user can play the collection."""
actor = rights_manager.Actor(self.user_id)
can_play = actor.can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
can_view = actor.can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
if can_play and can_view:
return handler(self, collection_id, **kwargs)
else:
raise self.PageNotFoundException
return test_can_play
class CollectionPage(base.BaseHandler):
"""Page describing a single collection."""
PAGE_NAME_FOR_CSRF = 'collection'
@require_collection_playable
def get(self, collection_id):
"""Handles GET requests."""
try:
collection = collection_services.get_collection_by_id(
collection_id)
except Exception as e:
raise self.PageNotFoundException(e)
whitelisted_usernames = (
config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value)
self.values.update({
'can_edit': (
bool(self.username) and
self.username in whitelisted_usernames and
self.username not in config_domain.BANNED_USERNAMES.value and
rights_manager.Actor(self.user_id).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)
),
'is_logged_in': bool(self.user_id),
'collection_id': collection_id,
'collection_title': collection.title,
'collection_skills': collection.skills,
'is_private': rights_manager.is_collection_private(collection_id),
'meta_name': collection.title,
'meta_description': utils.capitalize_string(collection.objective)
})
self.render_template('collection_player/collection_player.html')
class CollectionDataHandler(base.BaseHandler):
"""Provides the data for a single collection."""
def get(self, collection_id):
"""Populates the data on the individual collection page."""
allow_invalid_explorations = bool(
self.request.get('allow_invalid_explorations'))
try:
collection_dict = (
collection_services.get_learner_collection_dict_by_id(
collection_id, self.user_id,
allow_invalid_explorations=allow_invalid_explorations))
except Exception as e:
raise self.PageNotFoundException(e)
self.values.update({
'can_edit': (
self.user_id and rights_manager.Actor(self.user_id).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id)),
'collection': collection_dict,
'info_card_image_url': utils.get_info_card_url_for_category(
collection_dict['category']),
'is_logged_in': bool(self.user_id),
'session_id': utils.generate_new_session_id(),
})
self.render_json(self.values)
| 38.850467 | 78 | 0.675487 | [
"Apache-2.0"
] | Himanshu1495/oppia | core/controllers/collection_viewer.py | 4,157 | Python |
"""Tools for converting to other outputs."""
def to_getdist(nested_samples):
"""Convert from anesthetic to getdist samples.
Parameters
----------
nested_samples: MCMCSamples or NestedSamples
anesthetic samples to be converted
Returns
-------
getdist_samples: getdist.mcsamples.MCSamples
getdist equivalent samples
"""
import getdist
samples = nested_samples.to_numpy()
weights = nested_samples.weights
loglikes = -2*nested_samples.logL.to_numpy()
names = nested_samples.columns
ranges = {name: nested_samples._limits(name) for name in names}
return getdist.mcsamples.MCSamples(samples=samples,
weights=weights,
loglikes=loglikes,
ranges=ranges,
names=names)
| 31.928571 | 67 | 0.58613 | [
"MIT"
] | Stefan-Heimersheim/anesthetic | anesthetic/convert.py | 894 | Python |
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
def main():
# Define your modules arguments
module_args = dict(
name=dict(type="str", required=True),
new=dict(type="bool", required=False, default=False),
)
# Create an instance of the AnsibleModule class
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
# Define standard results
result = dict(changed=False, original_message="Something", message="It worked!!!")
# Return items as JSON
module.exit_json(**result)
if __name__ == "__main__":
main()
| 23.5 | 86 | 0.690671 | [
"Apache-2.0"
] | kinther/ansible_course | bonus2/collateral/modules/library/test_module.py | 611 | Python |
# -*- coding: utf-8 -*-
# @Time : 2019/7/18 上午9:54
# @Author : Lart Pang
# @FileName: metric.py
# @Project : MINet
# @GitHub : https://github.com/lartpang
import numpy as np
def cal_pr_mae_meanf(prediction, gt):
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
# 确保图片和真值相同 ##################################################
# if prediction.shape != gt.shape:
# prediction = Image.fromarray(prediction).convert('L')
# gt_temp = Image.fromarray(gt).convert('L')
# prediction = prediction.resize(gt_temp.size)
# prediction = np.array(prediction)
# 获得需要的预测图和二值真值 ###########################################
if prediction.max() == prediction.min():
prediction = prediction / 255
else:
prediction = (prediction - prediction.min()) / (prediction.max() - prediction.min())
hard_gt = np.zeros_like(gt)
hard_gt[gt > 128] = 1
# MAE ##################################################################
mae = np.mean(np.abs(prediction - hard_gt))
# MeanF ################################################################
threshold_fm = 2 * prediction.mean()
if threshold_fm > 1:
threshold_fm = 1
binary = np.zeros_like(prediction)
binary[prediction >= threshold_fm] = 1
tp = (binary * hard_gt).sum()
if tp == 0:
meanf = 0
else:
pre = tp / binary.sum()
rec = tp / hard_gt.sum()
meanf = 1.3 * pre * rec / (0.3 * pre + rec)
# PR curve #############################################################
t = np.sum(hard_gt)
precision, recall = [], []
for threshold in range(256):
threshold = threshold / 255.0
hard_prediction = np.zeros_like(prediction)
hard_prediction[prediction >= threshold] = 1
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
if tp == 0:
precision.append(0)
recall.append(0)
else:
precision.append(tp / p)
recall.append(tp / t)
precision = np.reshape(precision,(256,1))
recall = np.reshape(recall,(256,1))
return precision, recall, mae, meanf
# MaxF #############################################################
def cal_maxf(ps, rs):
assert len(ps) == 256
assert len(rs) == 256
maxf = []
for p, r in zip(ps, rs):
if p == 0 or r == 0:
maxf.append(0)
else:
maxf.append(1.3 * p * r / (0.3 * p + r))
return max(maxf)
| 30.807229 | 92 | 0.492374 | [
"MIT"
] | Farzanehkaji/MINet | code/utils/metric.py | 2,605 | Python |
import os
import logging
from typing import Dict, Union
from datetime import timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import mlflow
import torch
import pytorch_lightning as pl
import pprint
pp = pprint.PrettyPrinter(indent=4)
from arnet import utils
from arnet.modeling.models import build_model
logger = logging.getLogger(__name__)
def build_test_logger(logged_learner):
logger = pl.loggers.TensorBoardLogger(
logged_learner.logger_save_dir,
name=logged_learner.logger_name,
version=logged_learner.logger_version + '_test'
)
return logger
class Learner(pl.LightningModule):
def __init__(self, cfg):
"""
model: torch.nn.Module
cfg: model-agnostic experiment configs
"""
#super(Learner, self).__init__()
super().__init__()
self.cfg = cfg
self.image = 'MAGNETOGRAM' in cfg.DATA.FEATURES
self.model = build_model(cfg)
self.save_hyperparameters() # write to self.hparams. when save model, they are # responsible for tensorboard hp_metric
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def on_load_checkpoint(self, checkpoint) -> None:
# log_dev / lightning_logs / version_0 / checkpoints / epoch=0-step=4.ckpt
# =======================================
# save_dir / (name) (version)
# ------- root_dir ---------/
# ------------ log_dir ----------------/
# ckpt_list = checkpoint['hyper_parameters']['cfg']['LEARNER']['CHECKPOINT'].split('/')
# self.logger_save_dir, self.logger_name, self.logger_version = (
# ckpt_list[-5], ckpt_list[-4], ckpt_list[-3])
# I gave up modifying test log dir because it requires checkpoint['callbacks']["ModelCheckpoint{'monitor': 'validation0/tss', 'mode': 'max', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': True}"]['best_model_path']
pass
def grad_norm(self, norm_type: Union[float, int, str]) -> Dict[str, float]:
"""Compute each parameter's gradient's norm and their overall norm.
The overall norm is computed over all gradients together, as if they
were concatenated into a single vector.
Args:
norm_type: The type of the used p-norm, cast to float if necessary.
Can be ``'inf'`` for infinity norm.
Return:
norms: The dictionary of p-norms of each parameter's gradient and
a special entry for the total p-norm of the gradients viewed
as a single vector.
"""
#norm_type = float(norm_type)
norms, all_norms = {}, []
for name, p in self.named_parameters():
if name.split('.')[0] == 'model':
name = name[6:]
if p.grad is None:
continue
param_norm = float(p.data.norm(norm_type))
grad_norm = float(p.grad.data.norm(norm_type))
norms[f'grad_{norm_type}_norm/{name}'] = {
'param': param_norm,
'grad': grad_norm,
}
all_norms.append(param_norm)
total_norm = float(torch.tensor(all_norms).norm(norm_type))
norms[f'grad_{norm_type}_norm/total'] = round(total_norm, 3)
return norms
def _check_nan_loss(self, loss):
if torch.isnan(loss):
norms = self.grad_norm(1)
import json
print(json.dumps(norms, indent=2))
def training_step(self, batch, batch_idx):
loss = self.model.get_loss(batch)
self._check_nan_loss(loss)
# Scalar(s)
self.log('train/loss', loss)
mlflow.log_metric('train/loss', loss.item(), step=self.global_step)
mlflow.log_metric('train/epoch', self.trainer.current_epoch, step=self.global_step)
if self.image:
# Text
if self.global_step in [0] or batch_idx == 0:
self.log_meta(self.model.result)
# Input videos (padded)
if False: #self.global_step in [0] or batch_idx == 0:
self.log_video('train/inputs', x)
# Layer weight
# not changing fast enough within first epoch
if False: #self.current_epoch == 0 and batch_idx in [0, 1, 2, 5, 10, 20, 50, 100]:
self.log_layer_weights('weight', ['convs.conv1'])
# Middle layer features
if False: #self.global_step in [0] or batch_idx == 0:
self.log_layer_activations('train features', self.model.result['video'], self.cfg.LEARNER.VIS.ACTIVATIONS)
# Weight histograms
if True: #self.global_step in [0] or batch_idx == 0:
for layer_name in self.cfg.LEARNER.VIS.HISTOGRAM:
self.logger.experiment.add_histogram("weights/{} kernel".format(layer_name),
utils.get_layer(self.model, layer_name).weight, self.global_step)
self.logger.experiment.flush()
return {'loss': loss}
def validation_step(self, batch, batch_idx, dataloader_idx):
loss = self.model.get_loss(batch)
result = self.model.result
result.update({'val_loss': loss})
return result
def validation_epoch_end(self, outputs):
for dataloader_idx, dataloader_outputs in enumerate(outputs):
tag = f'validation{dataloader_idx}'
avg_val_loss = torch.stack([out['val_loss'] for out in dataloader_outputs]).mean()
self.log(tag + '/loss', avg_val_loss)
mlflow.log_metric(tag + '/loss', avg_val_loss.item(), step=self.global_step)
if True:
#step = -1 if self.global_step == 0 else None # before training
step = None # use global_step
self.log_layer_weights('weight', ['convs.conv1'], step=step)
y_true = torch.cat([out['y_true'] for out in dataloader_outputs])
y_prob = torch.cat([out['y_prob'] for out in dataloader_outputs])
self.trainer.datamodule.fill_prob(tag, self.global_step, y_prob.detach().cpu().numpy())
scores, cm2, _ = utils.get_metrics_probabilistic(y_true, y_prob, criterion=None)
self.log_scores(tag, scores, step=self.global_step) # pp.pprint(scores)
self.log_cm(tag + '/cm2', cm2, step=self.global_step)
self.log_eval_plots(tag, y_true, y_prob, step=self.global_step)
mlflow.log_artifacts(self.logger.log_dir, 'tensorboard/train_val')
def test_step(self, batch, batch_idx):
loss = self.model.get_loss(batch)
result = self.model.result
result.update({'test_loss': loss})
return result
def test_epoch_end(self, outputs):
avg_test_loss = torch.stack([out['test_loss'] for out in outputs]).mean()
self.log('test/loss', avg_test_loss)
y_true = torch.cat([out['y_true'] for out in outputs])
y_prob = torch.cat([out['y_prob'] for out in outputs])
self.trainer.datamodule.fill_prob('test', self.global_step, y_prob.detach().cpu().numpy())
scores, cm2, thresh = utils.get_metrics_probabilistic(y_true, y_prob, criterion=None)
#self.thresh = thresh
logger.info(scores)
logger.info(cm2)
self.log_scores('test', scores)
self.log_cm('test/cm2', cm2)
self.log_eval_plots('test', y_true, y_prob)
mlflow.log_artifacts(self.logger.log_dir, 'tensorboard/test')
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
_ = self.model.get_loss(batch)
y_prob = self.model.result['y_prob']
###
#self.thresh = 0.5
###
return y_prob #y_prob >= 0.5 #self.thresh
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.cfg.LEARNER.LEARNING_RATE)
def on_train_end(self):
for tag, df in self.trainer.datamodule.val_history.items():
if tag == 'test':
continue # val_history['test'] does not update every epoch.
tmp_path = 'outputs/val_predictions.csv'
df.to_csv(tmp_path)
mlflow.log_artifact(tmp_path, tag) # tag in ['validation0', ..., 'test']
def on_test_end(self):
tmp_path = 'outputs/test_predictions.csv'
self.trainer.datamodule.val_history['test'].to_csv(tmp_path)
mlflow.log_artifact(tmp_path, 'test')
def log_meta(self, outputs, step=None):
video = outputs['video']
meta = outputs['meta']
video = video.detach().cpu().numpy()
y_true = outputs['y_true'].detach().cpu().numpy()
y_prob = outputs['y_prob'].detach().cpu().numpy()
info = utils.generate_batch_info_classification(video, meta, y_true=y_true, y_prob=y_prob)
step = step or self.global_step
self.logger.experiment.add_text("batch info", info.to_markdown(), step)
return info
def log_video(self, tag, video, size=None, normalized=False, step=None):
from skimage.transform import resize
size = np.round(size.detach().cpu().numpy() * [38, 78] + [78, 157]).astype(int)
# video: [N, C, T, H, W]
if video.shape[0] > 8:
video = video[:8]
video = video.detach().permute(0, 2, 1, 3, 4).to('cpu', non_blocking=True) # convert to numpy may not be efficient in production
# (N,C,D,H,W) -> (N,T,C,H,W)
step = step or self.global_step
if not normalized:
video = utils.array_to_float_video(video * 50, low=-200, high=200, perc=False)
self.logger.experiment.add_video(tag, video, step, fps=10)
vs = video.detach().cpu().numpy()
for i, v in enumerate(vs):
for j, image in enumerate(v):
image = image.transpose(1,2,0)
if size is not None:
image = resize(image, size[i])
mlflow.log_image(image, tag+f'/{i}_{j}.png')
def log_layer_weights(self, tag, layer_names, step=None):
step = step or self.global_step
from arnet.modeling.models import MODEL_REGISTRY
if (isinstance(self.model, MODEL_REGISTRY.get('CNN_Li2020')) or
isinstance(self.model, MODEL_REGISTRY.get('SimpleC3D'))):
for layer_name in layer_names:
layer = utils.get_layer(self.model, layer_name)
if isinstance(layer, torch.nn.Conv3d):
# Unscaled
fig = utils.draw_conv2d_weight(layer.weight)
image_tensor = utils.fig2rgb(fig)
save_name = tag + f'/unscaled/{layer_name}'
self.logger.experiment.add_image(save_name, image_tensor, step)
save_name += f'/{step}.png'
mlflow.log_figure(fig, save_name)
# Set vmin vmax
fig = utils.draw_conv2d_weight(layer.weight, vmin=-0.3, vmax=0.3) # -1/+1 for lr 1e-2
image_tensor = utils.fig2rgb(fig)
save_name = tag + f'/uniform_scaled/{layer_name}'
self.logger.experiment.add_image(save_name, image_tensor, step)
save_name += f'/{step}.png'
mlflow.log_figure(fig, save_name)
def log_layer_activations(self, tag, x, layer_names, step=None):
step = step or self.global_step
import copy
model = copy.copy(self.model) # shallow copy, the original model keeps training mode and no activation hook attached
activations = utils.register_activations(model, layer_names)
model.eval()
_ = self.model(x)
for layer_name in activations:
features = activations[layer_name].detach().cpu()
if features.shape[0] > 8:
features = features[:8]
for c in range(features.shape[1]):
features_c = features[:,[c],:,:,:].permute(0,2,1,3,4)
features_c = utils.array_to_float_video(features_c, 0.1, 99.9)
self.logger.experiment.add_video(
'{}/{}/ch{}'.format(tag, layer_name, c),
features_c,
step)
def log_scores(self, tag, scores: dict, step=None):
step = step or self.global_step
for k, v in scores.items():
#self.logger.experiment.add_scalar(tag + '/' + k, v, step)
self.log(tag + '/' + k, v) #wield problem
mlflow.log_metrics({tag + '/' + k: v.item() for k, v in scores.items()},
step=step)
def log_cm(self, tag, cm, labels=None, step=None):
step = step or self.global_step
fig = utils.draw_confusion_matrix(cm.cpu())
image_tensor = utils.fig2rgb(fig)
self.logger.experiment.add_image(tag, image_tensor, step)
mlflow.log_figure(fig, tag + f'/{step}.png')
def log_eval_plots(self, tag, y_true, y_prob, step=None):
y_true = y_true.detach().cpu()
y_prob = y_prob.detach().cpu()
step = step or self.global_step
reliability = utils.draw_reliability_plot(y_true, y_prob, n_bins=10)
mlflow.log_figure(reliability, tag + f'/reliability/{step}.png')
reliability = utils.fig2rgb(reliability)
self.logger.experiment.add_image(tag + '/reliability', reliability, step)
roc = utils.draw_roc(y_true, y_prob)
mlflow.log_figure(roc, tag + f'/roc/{step}.png')
roc = utils.fig2rgb(roc)
self.logger.experiment.add_image(tag + '/roc', roc, step)
ssp = utils.draw_ssp(y_true, y_prob)
mlflow.log_figure(ssp, tag + f'/ssp/{step}.png')
ssp = utils.fig2rgb(ssp)
self.logger.experiment.add_image(tag + '/ssp', ssp, step)
| 42.93808 | 278 | 0.602855 | [
"MIT"
] | ZeyuSun/flare-prediction-smarp | arnet/modeling/learner.py | 13,869 | Python |
import sys
import json
import scrapapps
import scrapping
from textrank import TextRankSentences
import preprocessing
import summ
import textrankkeyword
import bss4
url = sys.argv[1]
# url = request.POST.get('web_link', None)
#web_link = scrapapps.scrap_data(url)
web_link = scrapping.scrap_data(url)
#Get Title
judul = scrapping.get_title(url)
raw_text = str(web_link)
# Preprocessing View
lower = preprocessing.text_lowercase(str(web_link))
rnumber = preprocessing.remove_numbers(lower)
white_space = preprocessing.remove_whitespace(rnumber)
stopword_list = preprocessing.remove_stopwords(white_space)
new_sentence = ' '.join(stopword_list)
stagging = preprocessing.stagging_text(new_sentence)
stop_plus = preprocessing.stopword_plus(new_sentence)
kalimat = ' '.join(stop_plus)
# Skenario 1
# n = 10;
# if len(stagging) < 10:
# n = 5
# if len(stagging) == 10:
# n = len(stagging) - 2
# if len(stagging) > 30:
# n = 15
# if len(stagging) < 5:
# n = len(stagging) - 1
# if len(stagging) == 1:
# n = len(stagging)
# Skenario 2
n = 7
if len(stagging) < 7:
n = len(stagging) - 1
if len(stagging) == 1:
n = len(stagging)
textrank = TextRankSentences()
text = textrank.analyze(str(new_sentence))
text = textrank.get_top_sentences(n)
# View Similarity Matriks
sim_mat = textrank._build_similarity_matrix(stagging)
#View Hasil Perhitungan Textrank
top_rank = textrank._run_page_rank(sim_mat)
result = textrank._run_page_rank(sim_mat)
# Clean Hasil
ringkasan = preprocessing.remove_punctuation(text)
# Panjang Plaintext
len_raw = len(str(web_link))
# Jumlah Text
len_text = len(str(text))
# Jumlah Kalimat
len_kalimat = len(stagging)
#Presentase Reduce
presentase = round(((len_text/len_raw)*100))
# keyphrases = textrankkeyword.extract_key_phrases(raw_text)
data = {
'raw_text' : raw_text,
'url' : url,
'judul' : judul,
'ringkasan':ringkasan,
'text':text,
'len_raw':len_raw,
'len_text':len_text,
'len_kalimat':len_kalimat,
'stagging':stagging,
'new_sentence':new_sentence,
# 'sim_mat':sim_mat,
# 'result':result,
'presentase':presentase,
'keyword':'-',
}
print(json.dumps(data)) | 19.558559 | 60 | 0.722248 | [
"MIT"
] | piscalpratama/KMSV2 | file/py/result_mashara.py | 2,171 | Python |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--infile", default=None, type=str, required=True, help="path of input file to preprocess")
parser.add_argument("--outfile", default=None, type=str, required=True,help="output file path")
args = parser.parse_args()
lines=open(args.infile,'r').readlines()
outfile=open(args.outfile,'w')
for line in lines:
if(len(line.split(' '))==0):
outfile.write('\n')
else:
outfile.write(line.split(' ')[0]+'\t'+line.split(' ')[-1])
| 31.375 | 111 | 0.699203 | [
"Apache-2.0"
] | Vaidehi99/OBPE | Fine-tuning/Utility Files/wikiann_preprocessor.py | 502 | Python |
import os
import logging, glob
from pathlib import Path
from functools import reduce, partial
from operator import getitem
from datetime import datetime
# from .logger import setup_logging
# import logger.setup_logging
# from . import logger
from .logger import setup_logging
from .util import read_json, write_json
# print(dir('logger'))
class ConfigParser:
def __init__(self, config, resume=None, modification=None, run_id=None):
"""
class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving
and logging module.
:param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.
:param resume: String, path to the checkpoint being loaded.
:param modification: Dict keychain:value, specifying position values to be replaced from config dict.
:param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default
"""
# load config file and apply modification
self._config = _update_config(config, modification)
self.resume = resume
# set save_dir where trained model and log will be saved.
save_dir = Path(self.config['trainer']['save_dir'])
exper_name = self.config['name']
if 'fold' in self.config['data_loader']['args']:
fold = self.config['data_loader']['args']['fold']
else:
fold = 0 # if no cross validation, use fold = 0
if self.resume:
if os.path.isdir(self.resume ):
self.root_dir = self.resume
elif os.path.isfile(self.resume):
self.root_dir = Path(self.resume).parent
else:
if run_id is None: # use timestamp as default run-id
# run_id = datetime.now().strftime(r'%m%d_%H%M%S') + config['fold']
run_id = "{}_fold_{}".format(datetime.now().strftime(r'%m%d_%H%M%S'), fold)
self.root_dir = save_dir / exper_name / run_id
# self._save_dir = save_dir / exper_name / run_id/ 'models'
# self._log_dir = save_dir/ exper_name / run_id / 'log'
# make directory for saving checkpoints and log.
exist_ok = (self.resume )#run_id == ''
# print(exist_ok)
self.root_dir.mkdir(parents=True, exist_ok=exist_ok)
# self.log_dir.mkdir(parents=True, exist_ok=exist_ok)
# save updated config file to the checkpoint dir
write_json(self.config, self.save_dir / 'config_{}_fold_{}.json'.format(exper_name, fold))
# configure logging module
setup_logging(self.log_dir)
self.log_levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
}
def get_root_dir(self):
return self.root_dir
@classmethod
def from_args(cls, args, options='', updates=dict()):
"""
Initialize this class from some cli arguments. Used in train, test.
"""
for opt in options:
args.add_argument(*opt.flags, default=None, type=opt.type)
if not isinstance(args, tuple):
args = args.parse_args()
if args.device is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
if args.resume is not None:
resume = Path(args.resume)
if args.config is None:
cfg_fname = glob.glob(os.path.join(resume, 'config*.json'))[0]
# cfg_fname = resume / 'config*.json'
else:
cfg_fname = Path(args.config)
else:
msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
assert args.config is not None, msg_no_cfg
resume = None
cfg_fname = Path(args.config)
config = read_json(cfg_fname)
if args.config and resume:
# update new config for fine-tuning
config.update(read_json(args.config))
# parse custom cli options into dictionary
modification = {opt.target : getattr(args, _get_opt_name(opt.flags)) for opt in options}
modification.update(updates)
return cls(config, resume, modification)
def init_obj(self, name, module, *args, **kwargs):
"""
Finds a function handle with the name given as 'type' in config, and returns the
instance initialized with corresponding arguments given.
`object = config.init_obj('name', module, a, b=1)`
is equivalent to
`object = module.name(a, b=1)`
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
# assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def init_ftn(self, name, module, *args, **kwargs):
"""
Finds a function handle with the name given as 'type' in config, and returns the
function with given arguments fixed with functools.partial.
`function = config.init_ftn('name', module, a, b=1)`
is equivalent to
`function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`.
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return partial(getattr(module, module_name), *args, **module_args)
def __getitem__(self, name):
"""Access items like ordinary dict."""
return self.config[name]
def get_logger(self, name, verbosity=2):
msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(verbosity, self.log_levels.keys())
assert verbosity in self.log_levels, msg_verbosity
logger = logging.getLogger(name)
logger.setLevel(self.log_levels[verbosity])
return logger
# setting read-only attributes
@property
def config(self):
return self._config
@property
def save_dir(self):
return self.root_dir# _save_dir
@property
def log_dir(self):
return self.root_dir#_log_dir
# helper functions to update config dict with custom cli options
def _update_config(config, modification):
if modification is None:
return config
for k, v in modification.items():
if v is not None:
_set_by_path(config, k, v)
return config
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '')
return flags[0].replace('--', '')
def _set_by_path(tree, keys, value):
"""Set a value in a nested object in tree by sequence of keys."""
keys = keys.split(';')
_get_by_path(tree, keys[:-1])[keys[-1]] = value
def _get_by_path(tree, keys):
"""Access a nested object in tree by sequence of keys."""
return reduce(getitem, keys, tree)
| 39.016304 | 142 | 0.630868 | [
"MIT"
] | weinajin/evaluate_multimodal_medical_image_heatmap_explanation | code/utils/parse_config.py | 7,179 | Python |
'''
Scenario discovery utilities used by both :mod:`cart` and :mod:`prim`
'''
from __future__ import (absolute_import, print_function, division,
unicode_literals)
import abc
import enum
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1 import host_subplot # @UnresolvedImports
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
from .plotting_util import COLOR_LIST, make_legend
# Created on May 24, 2015
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
__all__ = ["RuleInductionType"]
class RuleInductionType(enum.Enum):
REGRESSION = 'regression'
'''constant indicating regression mode'''
BINARY = 'binary'
'''constant indicating binary classification mode. This is the most
common used mode in scenario discovery'''
CLASSIFICATION = 'classification'
'''constant indicating classification mode'''
def _get_sorted_box_lims(boxes, box_init):
'''Sort the uncertainties for each box in boxes based on a
normalization given box_init. Unrestricted dimensions are dropped.
The sorting is based on the normalization of the first box in boxes.
Parameters
----------
boxes : list of numpy structured arrays
box_init : numpy structured array
Returns
-------
tuple
with the sorted boxes, and the list of restricted uncertainties
'''
# determine the uncertainties that are being restricted
# in one or more boxes
uncs = set()
for box in boxes:
us = _determine_restricted_dims(box, box_init)
uncs = uncs.union(us)
uncs = np.asarray(list(uncs))
# normalize the range for the first box
box_lim = boxes[0]
nbl = _normalize(box_lim, box_init, uncs)
box_size = nbl[:, 1] - nbl[:, 0]
# sort the uncertainties based on the normalized size of the
# restricted dimensions
uncs = uncs[np.argsort(box_size)]
box_lims = [box for box in boxes]
return box_lims, uncs.tolist()
def _make_box(x):
'''
Make a box that encompasses all the data
Parameters
----------
x : DataFrame
Returns
-------
DataFrame
'''
def limits(x):
if (pd.api.types.is_integer_dtype(x.dtype)) or\
(pd.api.types.is_float_dtype(x.dtype)): # @UndefinedVariable
return pd.Series([x.min(), x.max()])
else:
return pd.Series([set(x), set(x)])
return x.apply(limits)
def _normalize(box_lim, box_init, uncertainties):
'''Normalize the given box lim to the unit interval derived
from box init for the specified uncertainties.
Categorical uncertainties are normalized based on fractionated. So
value specifies the fraction of categories in the box_lim.
Parameters
----------
box_lim : DataFrame
box_init : DataFrame
uncertainties : list of strings
valid names of columns that exist in both structured
arrays.
Returns
-------
ndarray
a numpy array of the shape (2, len(uncertainties) with the
normalized box limits.
'''
# normalize the range for the first box
norm_box_lim = np.zeros((len(uncertainties), box_lim.shape[0]))
for i, u in enumerate(uncertainties):
dtype = box_lim[u].dtype
if dtype == np.dtype(object):
nu = len(box_lim.loc[0, u]) / len(box_init.loc[0, u])
nl = 0
else:
lower, upper = box_lim.loc[:, u]
dif = (box_init.loc[1, u] - box_init.loc[0, u])
a = 1 / dif
b = -1 * box_init.loc[0, u] / dif
nl = a * lower + b
nu = a * upper + b
norm_box_lim[i, :] = nl, nu
return norm_box_lim
def _determine_restricted_dims(box_limits, box_init):
'''returns a list of dimensions that is restricted
Parameters
----------
box_limits : pd.DataFrame
box_init : pd.DataFrame
Returns
-------
list of str
'''
cols = box_init.columns.values
restricted_dims = cols[np.all(
box_init.values == box_limits.values, axis=0) == False]
# restricted_dims = [column for column in box_init.columns if not
# np.all(box_init[column].values == box_limits[column].values)]
return restricted_dims
def _determine_nr_restricted_dims(box_lims, box_init):
'''
determine the number of restriced dimensions of a box given
compared to the inital box that contains all the data
Parameters
----------
box_lims : structured numpy array
a specific box limit
box_init : structured numpy array
the initial box containing all data points
Returns
-------
int
'''
return _determine_restricted_dims(box_lims, box_init).shape[0]
def _compare(a, b):
'''compare two boxes, for each dimension return True if the
same and false otherwise'''
dtypesDesc = a.dtype.descr
logical = np.ones((len(dtypesDesc,)), dtype=np.bool)
for i, entry in enumerate(dtypesDesc):
name = entry[0]
logical[i] = logical[i] &\
(a[name][0] == b[name][0]) &\
(a[name][1] == b[name][1])
return logical
def _in_box(x, boxlim):
'''
returns the a boolean index indicated which data points are inside
and which are outside of the given box_lims
Parameters
----------
x : pd.DataFrame
boxlim : pd.DataFrame
Returns
-------
ndarray
boolean 1D array
Raises
------
Attribute error if not numbered columns are not pandas
category dtype
'''
x_numbered = x.select_dtypes(np.number)
boxlim_numbered = boxlim.select_dtypes(np.number)
logical = (boxlim_numbered.loc[0, :].values <= x_numbered.values) &\
(x_numbered.values <= boxlim_numbered.loc[1, :].values)
logical = logical.all(axis=1)
# TODO:: how to speed this up
for column, values in x.select_dtypes(exclude=np.number).iteritems():
entries = boxlim.loc[0, column]
not_present = set(values.cat.categories.values) - entries
if not_present:
# what other options do we have here....
l = pd.isnull(x[column].cat.remove_categories(list(entries)))
logical = l & logical
return logical
def _setup(results, classify, incl_unc=[]):
"""helper function for setting up CART or PRIM
Parameters
----------
results : tuple of DataFrame and dict with numpy arrays
the return from :meth:`perform_experiments`.
classify : string, function or callable
either a string denoting the outcome of interest to
use or a function.
incl_unc : list of strings
Notes
-----
CART, PRIM, and feature scoring only work for a 1D numpy array
for the dependent variable
Raises
------
TypeError
if classify is not a string or a callable.
"""
x, outcomes = results
if incl_unc:
drop_names = set(x.columns.values.tolist()) - set(incl_unc)
x = x.drop(drop_names, axis=1)
if isinstance(classify, str):
y = outcomes[classify]
mode = RuleInductionType.REGRESSION
elif callable(classify):
y = classify(outcomes)
mode = RuleInductionType.BINARY
else:
raise TypeError("unknown type for classify")
assert y.ndim == 1
return x, y, mode
def _calculate_quasip(x, y, box, Hbox, Tbox):
'''
Parameters
----------
x : DataFrame
y : np.array
box : DataFrame
Hbox : int
Tbox : int
'''
logical = _in_box(x, box)
yi = y[logical]
# total nr. of cases in box with one restriction removed
Tj = yi.shape[0]
# total nr. of cases of interest in box with one restriction
# removed
Hj = np.sum(yi)
p = Hj / Tj
Hbox = int(Hbox)
Tbox = int(Tbox)
# force one sided
qp = sp.stats.binom_test(
Hbox, Tbox, p, alternative='greater') # @UndefinedVariable
return qp
def plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims):
''' helper function for pair wise scatter plotting
Parameters
----------
x : DataFrame
the experiments
y : numpy array
the outcome of interest
box_lim : DataFrame
a boxlim
box_init : DataFrame
restricted_dims : collection of strings
list of uncertainties that define the boxlims
'''
x = x[restricted_dims]
data = x.copy()
# TODO:: have option to change
# diag to CDF, gives you effectively the
# regional sensitivity analysis results
categorical_columns = data.select_dtypes('category').columns.values
categorical_mappings = {}
for column in categorical_columns:
# reorder categorical data so we
# can capture them in a single column
categories_inbox = boxlim.at[0, column]
categories_all = box_init.at[0, column]
missing = categories_all - categories_inbox
categories = list(categories_inbox) + list(missing)
print(column, categories)
data[column] = data[column].cat.set_categories(categories)
# keep the mapping for updating ticklabels
categorical_mappings[column] = dict(
enumerate(data[column].cat.categories))
# replace column with codes
data[column] = data[column].cat.codes
data['y'] = y # for testing
grid = sns.pairplot(data=data, hue='y', vars=x.columns.values)
cats = set(categorical_columns)
for row, ylabel in zip(grid.axes, grid.y_vars):
ylim = boxlim[ylabel]
if ylabel in cats:
y = -0.2
height = len(ylim[0]) - 0.6 # 2 * 0.2
else:
y = ylim[0]
height = ylim[1] - ylim[0]
for ax, xlabel in zip(row, grid.x_vars):
if ylabel == xlabel:
continue
if xlabel in cats:
xlim = boxlim.at[0, xlabel]
x = -0.2
width = len(xlim) - 0.6 # 2 * 0.2
else:
xlim = boxlim[xlabel]
x = xlim[0]
width = xlim[1] - xlim[0]
xy = x, y
box = patches.Rectangle(xy, width, height, edgecolor='red',
facecolor='none', lw=3)
ax.add_patch(box)
# do the yticklabeling for categorical rows
for row, ylabel in zip(grid.axes, grid.y_vars):
if ylabel in cats:
ax = row[0]
labels = []
for entry in ax.get_yticklabels():
_, value = entry.get_position()
try:
label = categorical_mappings[ylabel][value]
except KeyError:
label = ''
labels.append(label)
ax.set_yticklabels(labels)
# do the xticklabeling for categorical columns
for ax, xlabel in zip(grid.axes[-1], grid.x_vars):
if xlabel in cats:
labels = []
locs = []
mapping = categorical_mappings[xlabel]
for i in range(-1, len(mapping) + 1):
locs.append(i)
try:
label = categorical_mappings[xlabel][i]
except KeyError:
label = ''
labels.append(label)
ax.set_xticks(locs)
ax.set_xticklabels(labels, rotation=90)
return grid
def _setup_figure(uncs):
'''
helper function for creating the basic layout for the figures that
show the box lims.
'''
nr_unc = len(uncs)
fig = plt.figure()
ax = fig.add_subplot(111)
# create the shaded grey background
rect = mpl.patches.Rectangle((0, -0.5), 1, nr_unc + 1.5,
alpha=0.25,
facecolor="#C0C0C0",
edgecolor="#C0C0C0")
ax.add_patch(rect)
ax.set_xlim(left=-0.2, right=1.2)
ax.set_ylim(top=-0.5, bottom=nr_unc - 0.5)
ax.yaxis.set_ticks([y for y in range(nr_unc)])
ax.xaxis.set_ticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels(uncs[::-1])
return fig, ax
def plot_box(boxlim, qp_values, box_init, uncs,
coverage, density,
ticklabel_formatter="{} ({})",
boxlim_formatter="{: .2g}",
table_formatter="{:.3g}"):
'''Helper function for parallel coordinate style visualization
of a box
Parameters
----------
boxlim : DataFrame
qp_values : dict
box_init : DataFrame
uncs : list
coverage : float
density : float
ticklabel_formatter : str
boxlim_formatter : str
table_formatter : str
Returns
-------
a Figure instance
'''
norm_box_lim = _normalize(boxlim, box_init, uncs)
fig, ax = _setup_figure(uncs)
for j, u in enumerate(uncs):
# we want to have the most restricted dimension
# at the top of the figure
xj = len(uncs) - j - 1
plot_unc(box_init, xj, j, 0, norm_box_lim,
boxlim, u, ax)
# new part
dtype = box_init[u].dtype
props = {'facecolor': 'white',
'edgecolor': 'white',
'alpha': 0.25}
y = xj
if dtype == object:
elements = sorted(list(box_init[u][0]))
max_value = (len(elements) - 1)
values = boxlim.loc[0, u]
x = [elements.index(entry) for entry in
values]
x = [entry / max_value for entry in x]
for xi, label in zip(x, values):
ax.text(xi, y - 0.2, label, ha='center', va='center',
bbox=props, color='blue', fontweight='normal')
else:
props = {'facecolor': 'white',
'edgecolor': 'white',
'alpha': 0.25}
# plot limit text labels
x = norm_box_lim[j, 0]
if not np.allclose(x, 0):
label = boxlim_formatter.format(boxlim.loc[0, u])
ax.text(x, y - 0.2, label, ha='center', va='center',
bbox=props, color='blue', fontweight='normal')
x = norm_box_lim[j][1]
if not np.allclose(x, 1):
label = boxlim_formatter.format(boxlim.loc[1, u])
ax.text(x, y - 0.2, label, ha='center', va='center',
bbox=props, color='blue', fontweight='normal')
# plot uncertainty space text labels
x = 0
label = boxlim_formatter.format(box_init.loc[0, u])
ax.text(x - 0.01, y, label, ha='right', va='center',
bbox=props, color='black', fontweight='normal')
x = 1
label = boxlim_formatter.format(box_init.loc[1, u])
ax.text(x + 0.01, y, label, ha='left', va='center',
bbox=props, color='black', fontweight='normal')
# set y labels
qp_formatted = {}
for key, values in qp_values.items():
values = [vi for vi in values if vi != -1]
if len(values) == 1:
value = '{:.2g}'.format(values[0])
else:
value = '{:.2g}, {:.2g}'.format(*values)
qp_formatted[key] = value
labels = [ticklabel_formatter.format(u, qp_formatted[u]) for u in
uncs]
labels = labels[::-1]
ax.set_yticklabels(labels)
# remove x tick labels
ax.set_xticklabels([])
coverage = table_formatter.format(coverage)
density = table_formatter.format(density)
# add table to the left
ax.table(cellText=[[coverage], [density]],
colWidths=[0.1] * 2,
rowLabels=['coverage', 'density'],
colLabels=None,
loc='right',
bbox=[1.2, 0.9, 0.1, 0.1],)
plt.subplots_adjust(left=0.1, right=0.75)
return fig
def plot_ppt(peeling_trajectory):
'''show the peeling and pasting trajectory in a figure'''
ax = host_subplot(111)
ax.set_xlabel("peeling and pasting trajectory")
par = ax.twinx()
par.set_ylabel("nr. restricted dimensions")
ax.plot(peeling_trajectory['mean'], label="mean")
ax.plot(peeling_trajectory['mass'], label="mass")
ax.plot(peeling_trajectory['coverage'], label="coverage")
ax.plot(peeling_trajectory['density'], label="density")
par.plot(peeling_trajectory['res_dim'], label="restricted dims")
ax.grid(True, which='both')
ax.set_ylim(bottom=0, top=1)
fig = plt.gcf()
make_legend(['mean', 'mass', 'coverage', 'density',
'restricted_dim'],
ax, ncol=5, alpha=1)
return fig
def plot_tradeoff(peeling_trajectory, cmap=mpl.cm.viridis): # @UndefinedVariable
'''Visualize the trade off between coverage and density. Color
is used to denote the number of restricted dimensions.
Parameters
----------
cmap : valid matplotlib colormap
Returns
-------
a Figure instance
'''
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
boundaries = np.arange(-0.5,
max(peeling_trajectory['res_dim']) + 1.5,
step=1)
ncolors = cmap.N
norm = mpl.colors.BoundaryNorm(boundaries, ncolors)
p = ax.scatter(peeling_trajectory['coverage'],
peeling_trajectory['density'],
c=peeling_trajectory['res_dim'],
norm=norm,
cmap=cmap)
ax.set_ylabel('density')
ax.set_xlabel('coverage')
ax.set_ylim(bottom=0, top=1.2)
ax.set_xlim(left=0, right=1.2)
ticklocs = np.arange(0,
max(peeling_trajectory['res_dim']) + 1,
step=1)
cb = fig.colorbar(p, spacing='uniform', ticks=ticklocs,
drawedges=True)
cb.set_label("nr. of restricted dimensions")
return fig
def plot_unc(box_init, xi, i, j, norm_box_lim, box_lim, u, ax,
color=sns.color_palette()[0]):
'''
Parameters:
----------
xi : int
the row at which to plot
i : int
the index of the uncertainty being plotted
j : int
the index of the box being plotted
u : string
the uncertainty being plotted:
ax : axes instance
the ax on which to plot
'''
dtype = box_init[u].dtype
y = xi - j * 0.1
if dtype == object:
elements = sorted(list(box_init[u][0]))
max_value = (len(elements) - 1)
box_lim = box_lim[u][0]
x = [elements.index(entry) for entry in
box_lim]
x = [entry / max_value for entry in x]
y = [y] * len(x)
ax.scatter(x, y, edgecolor=color,
facecolor=color)
else:
ax.plot(norm_box_lim[i], (y, y),
c=color)
def plot_boxes(x, boxes, together):
'''Helper function for plotting multiple boxlims
Parameters
----------
x : pd.DataFrame
boxes : list of pd.DataFrame
together : bool
'''
box_init = _make_box(x)
box_lims, uncs = _get_sorted_box_lims(boxes, box_init)
# normalize the box lims
# we don't need to show the last box, for this is the
# box_init, which is visualized by a grey area in this
# plot.
norm_box_lims = [_normalize(box_lim, box_init, uncs) for
box_lim in boxes]
if together:
fig, ax = _setup_figure(uncs)
for i, u in enumerate(uncs):
colors = itertools.cycle(COLOR_LIST)
# we want to have the most restricted dimension
# at the top of the figure
xi = len(uncs) - i - 1
for j, norm_box_lim in enumerate(norm_box_lims):
color = next(colors)
plot_unc(box_init, xi, i, j, norm_box_lim,
box_lims[j], u, ax, color)
plt.tight_layout()
return fig
else:
figs = []
colors = itertools.cycle(COLOR_LIST)
for j, norm_box_lim in enumerate(norm_box_lims):
fig, ax = _setup_figure(uncs)
ax.set_title('box {}'.format(j))
color = next(colors)
figs.append(fig)
for i, u in enumerate(uncs):
xi = len(uncs) - i - 1
plot_unc(box_init, xi, i, 0, norm_box_lim,
box_lims[j], u, ax, color)
plt.tight_layout()
return figs
class OutputFormatterMixin(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def boxes(self):
'''Property for getting a list of box limits'''
raise NotImplementedError
@abc.abstractproperty
def stats(self):
'''property for getting a list of dicts containing the statistics
for each box'''
raise NotImplementedError
def boxes_to_dataframe(self):
'''convert boxes to pandas dataframe'''
boxes = self.boxes
# determine the restricted dimensions
# print only the restricted dimension
box_lims, uncs = _get_sorted_box_lims(boxes, _make_box(self.x))
nr_boxes = len(boxes)
dtype = float
index = ["box {}".format(i + 1) for i in range(nr_boxes)]
for value in box_lims[0].dtypes:
if value == object:
dtype = object
break
columns = pd.MultiIndex.from_product([index,
['min', 'max', ]])
df_boxes = pd.DataFrame(np.zeros((len(uncs), nr_boxes * 2)),
index=uncs,
dtype=dtype,
columns=columns)
# TODO should be possible to make more efficient
for i, box in enumerate(box_lims):
for unc in uncs:
values = box.loc[:, unc]
values = values.rename({0: 'min', 1: 'max'})
df_boxes.loc[unc][index[i]] = values
return df_boxes
def stats_to_dataframe(self):
'''convert stats to pandas dataframe'''
stats = self.stats
index = pd.Index(['box {}'.format(i + 1) for i in range(len(stats))])
return pd.DataFrame(stats, index=index)
def show_boxes(self, together=False):
'''display boxes
Parameters
----------
together : bool, otional
'''
plot_boxes(self.x, self.boxes, together=together)
| 28.130597 | 81 | 0.570588 | [
"BSD-3-Clause"
] | brodderickrodriguez/EMA_lite | ema_workbench/analysis/scenario_discovery_util.py | 22,617 | Python |
import sys
import os
from cx_Freeze import setup, Executable
# because of how namespace packages work, cx-freeze isn't finding zope.interface
# the following will import it, find the path of zope, and add a new empty
# file name _init__.py at the /site-packages/python2.7/zope path.
# this was found here:
# https://bitbucket.org/anthony_tuininga/cx_freeze/issues/47/cannot-import-zopeinterface
import zope
path = zope.__path__[0]
open(os.path.join(path, '__init__.py'), 'wb') # create __init__.py file
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"packages": ["zope.interface"]}
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup( name = "tw-cs",
version = "0.1",
description = "My twisted application!",
options = {"build_exe": build_exe_options},
executables = [Executable("echoserv.py", base=base)])
| 33.703704 | 88 | 0.715385 | [
"MIT"
] | derwolfe/twisted-cxfreeze-example | setup.py | 910 | Python |
# -*- coding: utf-8 -*
'''问卷数据分析工具包
Created on Tue Nov 8 20:05:36 2016
@author: JSong
1、针对问卷星数据,编写并封装了很多常用算法
2、利用report工具包,能将数据直接导出为PPTX
该工具包支持一下功能:
1、编码问卷星、问卷网等数据
2、封装描述统计和交叉分析函数
3、支持生成一份整体的报告和相关数据
'''
import os
import re
import sys
import math
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .. import report as rpt
from .. import associate
__all__=['read_code',
'save_code',
'spec_rcode',
'dataText_to_code',
'dataCode_to_text',
'var_combine',
'wenjuanwang',
'wenjuanxing',
'load_data',
'read_data',
'save_data',
'data_merge',
'clean_ftime',
'data_auto_code',
'qdata_flatten',
'sample_size_cal',
'confidence_interval',
'gof_test',
'chi2_test',
'fisher_exact',
'anova',
'mca',
'cluster',
'scatter',
'sankey',
'qtable',
'association_rules',
'contingency',
'cross_chart',
'summary_chart',
'onekey_gen',
'scorpion']
#=================================================================
#
#
# 【问卷数据处理】
#
#
#==================================================================
def read_code(filename):
'''读取code编码文件并输出为字典格式
1、支持json格式
2、支持本包规定的xlsx格式
see alse to_code
'''
file_type=os.path.splitext(filename)[1][1:]
if file_type == 'json':
import json
code=json.load(filename)
return code
d=pd.read_excel(filename,header=None)
d=d[d.any(axis=1)]#去除空行
d.fillna('NULL',inplace=True)
d=d.as_matrix()
code={}
for i in range(len(d)):
tmp=d[i,0].strip()
if tmp == 'key':
# 识别题号
code[d[i,1]]={}
key=d[i,1]
elif tmp in ['qlist','code_order']:
# 识别字典值为列表的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp2=list(d[i:j,1])
# 列表中字符串的格式化,去除前后空格
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
elif tmp in ['code','code_r']:
# 识别字典值为字典的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
# 识别其他的列表字段
elif (tmp!='NULL') and (d[i,2]=='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
if i==len(d)-1:
code[key][tmp]=d[i,1]
else:
tmp2=list(d[i:j,1])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
# 识别其他的字典字段
elif (tmp!='NULL') and (d[i,2]!='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
elif tmp == 'NULL':
continue
else:
code[key][tmp]=d[i,1]
return code
def save_code(code,filename='code.xlsx'):
'''code本地输出
1、输出为json格式,根据文件名自动识别
2、输出为Excel格式
see also read_code
'''
save_type=os.path.splitext(filename)[1][1:]
if save_type == 'json':
code=pd.DataFrame(code)
code.to_json(filename,force_ascii=False)
return
tmp=pd.DataFrame(columns=['name','value1','value2'])
i=0
if all(['Q' in c[0] for c in code.keys()]):
key_qlist=sorted(code,key=lambda c:int(re.findall('\d+',c)[0]))
else:
key_qlist=code.keys()
for key in key_qlist:
code0=code[key]
tmp.loc[i]=['key',key,'']
i+=1
#print(key)
for key0 in code0:
tmp2=code0[key0]
if (type(tmp2) == list) and tmp2:
tmp.loc[i]=[key0,tmp2[0],'']
i+=1
for ll in tmp2[1:]:
tmp.loc[i]=['',ll,'']
i+=1
elif (type(tmp2) == dict) and tmp2:
try:
tmp2_key=sorted(tmp2,key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp2_key=list(tmp2.keys())
j=0
for key1 in tmp2_key:
if j==0:
tmp.loc[i]=[key0,key1,tmp2[key1]]
else:
tmp.loc[i]=['',key1,tmp2[key1]]
i+=1
j+=1
else:
if tmp2:
tmp.loc[i]=[key0,tmp2,'']
i+=1
if sys.version>'3':
tmp.to_excel(filename,index=False,header=False)
else:
tmp.to_csv(filename,index=False,header=False,encoding='utf-8')
'''问卷数据导入和编码
对每一个题目的情形进行编码:题目默认按照Q1、Q2等给出
Qn.content: 题目内容
Qn.qtype: 题目类型,包含:单选题、多选题、填空题、排序题、矩阵单选题等
Qn.qlist: 题目列表,例如多选题对应着很多小题目
Qn.code: dict,题目选项编码
Qn.code_r: 题目对应的编码(矩阵题目专有)
Qn.code_order: 题目类别的顺序,用于PPT报告的生成[一般后期添加]
Qn.name: 特殊类型,包含:城市题、NPS题等
Qn.weight:dict,每个选项的权重
'''
def dataText_to_code(df,sep,qqlist=None):
'''编码文本数据
'''
if sep in [';','┋']:
qtype='多选题'
elif sep in ['-->','→']:
qtype='排序题'
if not qqlist:
qqlist=df.columns
# 处理多选题
code={}
for qq in qqlist:
tmp=df[qq].map(lambda x : x.split(sep) if isinstance(x,str) else [])
item_list=sorted(set(tmp.sum()))
if qtype == '多选题':
tmp=tmp.map(lambda x: [int(t in x) for t in item_list])
code_tmp={'code':{},'qtype':u'多选题','qlist':[],'content':qq}
elif qtype == '排序题':
tmp=tmp.map(lambda x:[x.index(t)+1 if t in x else np.nan for t in item_list])
code_tmp={'code':{},'qtype':u'排序题','qlist':[],'content':qq}
for i,t in enumerate(item_list):
column_name='{}_A{:.0f}'.format(qq,i+1)
df[column_name]=tmp.map(lambda x:x[i])
code_tmp['code'][column_name]=item_list[i]
code_tmp['qlist']=code_tmp['qlist']+[column_name]
code[qq]=code_tmp
df.drop(qq,axis=1,inplace=True)
return df,code
def dataCode_to_text(df,code=None):
'''将按序号数据转换成文本
'''
if df.max().max()>1:
sep='→'
else:
sep='┋'
if code:
df=df.rename(code)
qlist=list(df.columns)
df['text']=np.nan
if sep in ['┋']:
for i in df.index:
w=df.loc[i,:]==1
df.loc[i,'text']=sep.join(list(w.index[w]))
elif sep in ['→']:
for i in df.index:
w=df.loc[i,:]
w=w[w>=1].sort_values()
df.loc[i,'text']=sep.join(list(w.index))
df.drop(qlist,axis=1,inplace=True)
return df
def var_combine(data,code,qq1,qq2,sep=',',qnum_new=None,qname_new=None):
'''将两个变量组合成一个变量
例如:
Q1:'性别',Q2: 年龄
组合后生成:
1、男_16~19岁
2、男_20岁~40岁
3、女_16~19岁
4、女_20~40岁
'''
if qnum_new is None:
if 'Q'==qq2[0]:
qnum_new=qq1+'_'+qq2[1:]
else:
qnum_new=qq1+'_'+qq2
if qname_new is None:
qname_new=code[qq1]['content']+'_'+code[qq2]['content']
if code[qq1]['qtype']!='单选题' or code[qq2]['qtype']!='单选题':
print('只支持组合两个单选题,请检查.')
raise
d1=data[code[qq1]['qlist'][0]]
d2=data[code[qq2]['qlist'][0]]
sm=max(code[qq1]['code'].keys())# 进位制
sn=max(code[qq2]['code'].keys())# 进位制
if isinstance(sm,str) or isinstance(sn,str):
print('所选择的两个变量不符合函数要求.')
raise
data[qnum_new]=(d1-1)*sn+d2
code[qnum_new]={'qtype':'单选题','qlist':[qnum_new],'content':qname_new}
code_tmp={}
for c1 in code[qq1]['code']:
for c2 in code[qq2]['code']:
cc=(c1-1)*sn+c2
value='{}{}{}'.format(code[qq1]['code'][c1],sep,code[qq2]['code'][c2])
code_tmp[cc]=value
code[qnum_new]['code']=code_tmp
print('变量已合并,新变量题号为:{}'.format(qnum_new))
return data,code
def wenjuanwang(filepath='.\\data',encoding='gbk'):
'''问卷网数据导入和编码
输入:
filepath:
列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件
文件夹路径,函数会自动在文件夹下搜寻相关数据
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
filename3=filepath[2]
elif os.path.isdir(filepath):
filename1=os.path.join(filepath,'All_Data_Readable.csv')
filename2=os.path.join(filepath,'All_Data_Original.csv')
filename3=os.path.join(filepath,'code.csv')
else:
print('can not dection the filepath!')
d1=pd.read_csv(filename1,encoding=encoding)
d1.drop([u'答题时长'],axis=1,inplace=True)
d2=pd.read_csv(filename2,encoding=encoding)
d3=pd.read_csv(filename3,encoding=encoding,header=None,na_filter=False)
d3=d3.as_matrix()
# 遍历code.csv,获取粗略的编码,暂缺qlist,矩阵单选题的code_r
code={}
for i in range(len(d3)):
if d3[i,0]:
key=d3[i,0]
code[key]={}
code[key]['content']=d3[i,1]
code[key]['qtype']=d3[i,2]
code[key]['code']={}
code[key]['qlist']=[]
elif d3[i,2]:
tmp=d3[i,1]
if code[key]['qtype'] in [u'多选题',u'排序题']:
tmp=key+'_A'+'%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist'].append(tmp)
elif code[key]['qtype'] in [u'单选题']:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist']=[key]
elif code[key]['qtype'] in [u'填空题']:
code[key]['qlist']=[key]
else:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
# 更新矩阵单选的code_r和qlist
qnames_Readable=list(d1.columns)
qnames=list(d2.columns)
for key in code.keys():
qlist=[]
for name in qnames:
if re.match(key+'_',name) or key==name:
qlist.append(name)
if ('qlist' not in code[key]) or (not code[key]['qlist']):
code[key]['qlist']=qlist
if code[key]['qtype'] in [u'矩阵单选题']:
tmp=[qnames_Readable[qnames.index(q)] for q in code[key]['qlist']]
code_r=[re.findall('_([^_]*?)$',t)[0] for t in tmp]
code[key]['code_r']=dict(zip(code[key]['qlist'],code_r))
# 处理时间格式
d2['start']=pd.to_datetime(d2['start'])
d2['finish']=pd.to_datetime(d2['finish'])
tmp=d2['finish']-d2['start']
tmp=tmp.astype(str).map(lambda x:60*int(re.findall(':(\d+):',x)[0])+int(re.findall(':(\d+)\.',x)[0]))
ind=np.where(d2.columns=='finish')[0][0]
d2.insert(int(ind)+1,u'答题时长(秒)',tmp)
return (d2,code)
def wenjuanxing(filepath='.\\data',headlen=6):
'''问卷星数据导入和编码
输入:
filepath:
列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本
文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\d+_\d+_0.xls和\d+_\d+_2.xls
headlen: 问卷星数据基础信息的列数
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
#filepath='.\\data'
#headlen=6# 问卷从开始到第一道正式题的数目(一般包含序号,提交答卷时间的等等)
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
elif os.path.isdir(filepath):
filelist=os.listdir(filepath)
n1=n2=0
for f in filelist:
s1=re.findall('\d+_\d+_0.xls',f)
s2=re.findall('\d+_\d+_2.xls',f)
if s1:
filename1=s1[0]
n1+=1
if s2:
filename2=s2[0]
n2+=1
if n1+n2==0:
print(u'在文件夹下没有找到问卷星按序号和按文本数据,请检查目录或者工作目录.')
return
elif n1+n2>2:
print(u'存在多组问卷星数据,请检查.')
return
filename1=os.path.join(filepath,filename1)
filename2=os.path.join(filepath,filename2)
else:
print('can not dection the filepath!')
d1=pd.read_excel(filename1)
d2=pd.read_excel(filename2)
d2.replace({-2:np.nan,-3:np.nan},inplace=True)
#d1.replace({u'(跳过)':np.nan},inplace=True)
code={}
'''
遍历一遍按文本数据,获取题号和每个题目的类型
'''
for name in d1.columns[headlen:]:
tmp=re.findall(u'^(\d{1,3})[、::]',name)
# 识别多选题、排序题
if tmp:
new_name='Q'+tmp[0]
current_name='Q'+tmp[0]
code[new_name]={}
content=re.findall(u'\d{1,3}[、::](.*)',name)
code[new_name]['content']=content[0]
d1.rename(columns={name:new_name},inplace=True)
code[new_name]['qlist']=[]
code[new_name]['code']={}
code[new_name]['qtype']=''
code[new_name]['name']=''
qcontent=str(list(d1[new_name]))
# 单选题和多选题每个选项都可能有开放题,得识别出来
if ('〖' in qcontent) and ('〗' in qcontent):
code[new_name]['qlist_open']=[]
if '┋' in qcontent:
code[new_name]['qtype']=u'多选题'
elif '→' in qcontent:
code[new_name]['qtype']=u'排序题'
# 识别矩阵单选题
else:
tmp2=re.findall(u'^第(\d{1,3})题\(.*?\)',name)
if tmp2:
new_name='Q'+tmp2[0]
else:
pass
if new_name not in code.keys():
j=1
current_name=new_name
new_name=new_name+'_R%s'%j
code[current_name]={}
code[current_name]['content']=current_name+'(问卷星数据中未找到题目具体内容)'
code[current_name]['qlist']=[]
code[current_name]['code']={}
code[current_name]['code_r']={}
code[current_name]['qtype']=u'矩阵单选题'
code[current_name]['name']=''
#code[current_name]['sample_len']=0
d1.rename(columns={name:new_name},inplace=True)
else:
j+=1
new_name=new_name+'_R%s'%j
d1.rename(columns={name:new_name},inplace=True)
#raise Exception(u"can not dection the NO. of question.")
#print('can not dection the NO. of question')
#print(name)
#pass
# 遍历按序号数据,完整编码
d2qlist=d2.columns[6:].tolist()
for name in d2qlist:
tmp1=re.findall(u'^(\d{1,3})[、::]',name)# 单选题和填空题
tmp2=re.findall(u'^第(.*?)题',name)# 多选题、排序题和矩阵单选题
if tmp1:
current_name='Q'+tmp1[0]# 当前题目的题号
d2.rename(columns={name:current_name},inplace=True)
code[current_name]['qlist'].append(current_name)
#code[current_name]['sample_len']=d2[current_name].count()
ind=d2[current_name].copy()
ind=ind.notnull()
c1=d1.loc[ind,current_name].unique()
c2=d2.loc[ind,current_name].unique()
#print('========= %s========'%current_name)
if (c2.dtype == object) or ((list(c1)==list(c2)) and len(c2)>=min(15,len(d2[ind]))) or (len(c2)>50):
code[current_name]['qtype']=u'填空题'
else:
code[current_name]['qtype']=u'单选题'
#code[current_name]['code']=dict(zip(c2,c1))
if 'qlist_open' in code[current_name].keys():
tmp=d1[current_name].map(lambda x: re.findall('〖(.*?)〗',x)[0] if re.findall('〖(.*?)〗',x) else '')
ind_open=np.argwhere(d2.columns.values==current_name).tolist()[0][0]
d2.insert(ind_open+1,current_name+'_open',tmp)
d1[current_name]=d1[current_name].map(lambda x: re.sub('〖.*?〗','',x))
#c1=d1.loc[ind,current_name].map(lambda x: re.sub('〖.*?〗','',x)).unique()
code[current_name]['qlist_open']=[current_name+'_open']
#c2_tmp=d2.loc[ind,current_name].map(lambda x: int(x) if (('%s'%x!='nan') and not(isinstance(x,str)) and (int(x)==x)) else x)
code[current_name]['code']=dict(zip(d2.loc[ind,current_name],d1.loc[ind,current_name]))
#code[current_name]['code']=dict(zip(c2,c1))
elif tmp2:
name0='Q'+tmp2[0]
# 新题第一个选项
if name0 != current_name:
j=1#记录多选题的小题号
current_name=name0
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
code[current_name]['code']=dict(zip(c2,c1))
#print(dict(zip(c2,c1)))
else:
name1='Q'+tmp2[0]+'_A%s'%j
#code[current_name]['sample_len']=d2[name].notnull().sum()
else:
j+=1#记录多选题的小题号
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
old_dict=code[current_name]['code'].copy()
new_dict=dict(zip(c2,c1))
old_dict.update(new_dict)
code[current_name]['code']=old_dict.copy()
else:
name1='Q'+tmp2[0]+'_A%s'%j
code[current_name]['qlist'].append(name1)
d2.rename(columns={name:name1},inplace=True)
tmp3=re.findall(u'第.*?题\((.*)\)',name)[0]
if code[current_name]['qtype'] == u'矩阵单选题':
code[current_name]['code_r'][name1]=tmp3
else:
code[current_name]['code'][name1]=tmp3
# 识别开放题
if (code[current_name]['qtype'] == u'多选题'):
openq=tmp3+'〖.*?〗'
openq=re.sub('\)','\)',openq)
openq=re.sub('\(','\(',openq)
openq=re.compile(openq)
qcontent=str(list(d1[current_name]))
if re.findall(openq,qcontent):
tmp=d1[current_name].map(lambda x: re.findall(openq,x)[0] if re.findall(openq,x) else '')
ind=np.argwhere(d2.columns.values==name1).tolist()[0][0]
d2.insert(ind+1,name1+'_open',tmp)
code[current_name]['qlist_open'].append(name1+'_open')
# 删除字典中的nan
keys=list(code[current_name]['code'].keys())
for key in keys:
if '%s'%key == 'nan':
del code[current_name]['code'][key]
# 处理一些特殊题目,给它们的选项固定顺序,例如年龄、收入等
for k in code.keys():
content=code[k]['content']
qtype=code[k]['qtype']
if ('code' in code[k]) and (code[k]['code']!={}):
tmp1=code[k]['code'].keys()
tmp2=code[k]['code'].values()
# 识别选项是否是有序变量
tmp3=[len(re.findall('\d+','%s'%v))>0 for v in tmp2]#是否有数字
tmp4=[len(re.findall('-|~','%s'%v))>0 for v in tmp2]#是否有"-"或者"~"
if (np.array(tmp3).sum()>=len(tmp2)-2) or (np.array(tmp4).sum()>=len(tmp2)*0.8-(1e-17)):
try:
tmp_key=sorted(code[k]['code'],key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp_key=list(tmp1)
code_order=[code[k]['code'][v] for v in tmp_key]
code[k]['code_order']=code_order
# 识别矩阵量表题
if qtype=='矩阵单选题':
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if (set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10])) and (len(tmp3)==len(tmp2)):
code[k]['weight']=dict(zip(tmp1,tmp3))
continue
# 识别特殊题型
if ('性别' in content) and ('男' in tmp2) and ('女' in tmp2):
code[k]['name']='性别'
if ('gender' in content.lower()) and ('Male' in tmp2) and ('Female' in tmp2):
code[k]['name']='性别'
if (('年龄' in content) or ('age' in content.lower())) and (np.array(tmp3).sum()>=len(tmp2)-1):
code[k]['name']='年龄'
if ('满意度' in content) and ('整体' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='满意度'
if len(tmp3)==len(tmp2):
code[k]['weight']=dict(zip(tmp1,tmp3))
if ('意愿' in content) and ('推荐' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='NPS'
if len(tmp3)==len(tmp2):
weight=pd.Series(dict(zip(tmp1,tmp3)))
weight=weight.replace(dict(zip([0,1,2,3,4,5,6,7,8,9,10],[-100,-100,-100,-100,-100,-100,-100,0,0,100,100])))
code[k]['weight']=weight.to_dict()
try:
d2[u'所用时间']=d2[u'所用时间'].map(lambda s: int(s[:-1]))
except:
pass
return (d2,code)
def load_data(method='filedialog',**kwargs):
'''导入问卷数据
# 暂时只支持已编码的和问卷星数据
1、支持路径搜寻
2、支持自由选择文件
method:
-filedialog: 打开文件窗口选择
-pathsearch:自带搜索路径,需提供filepath
'''
if method=='filedialog':
import tkinter as tk
from tkinter.filedialog import askopenfilenames
tk.Tk().withdraw();
#print(u'请选择编码所需要的数据文件(支持问卷星和已编码好的数据)')
if 'initialdir' in kwargs:
initialdir=kwargs['initialdir']
elif os.path.isdir('.\\data'):
initialdir = ".\\data"
else:
initialdir = "."
title =u"请选择编码所需要的数据文件(支持问卷星和已编码好的数据)"
filetypes = (("Excel files","*.xls;*.xlsx"),("CSV files","*.csv"),("all files","*.*"))
filenames=[]
while len(filenames)<1:
filenames=askopenfilenames(initialdir=initialdir,title=title,filetypes=filetypes)
if len(filenames)<1:
print('请至少选择一个文件.')
filenames=list(filenames)
elif method == 'pathsearch':
if 'filepath' in kwargs:
filepath=kwargs['filepath']
else :
filepath='.\\data\\'
if os.path.isdir(filepath):
filenames=os.listdir(filepath)
filenames=[os.path.join(filepath,s) for s in filenames]
else:
print('搜索路径错误')
raise
info=[]
for filename in filenames:
filename_nopath=os.path.split(filename)[1]
data=read_data(filename)
# 第一列包含的字段
field_c1=set(data.iloc[:,0].dropna().unique())
field_r1=set(data.columns)
# 列名是否包含Q
hqlen=[len(re.findall('^[qQ]\d+',c))>0 for c in field_r1]
hqrate=hqlen.count(True)/len(field_r1) if len(field_r1)>0 else 0
rowlens,collens=data.shape
# 数据中整数/浮点数的占比
rate_real=data.applymap(lambda x:isinstance(x,(int,float))).sum().sum()/rowlens/collens
tmp={'filename':filename_nopath,'filenametype':'','rowlens':rowlens,'collens':collens,\
'field_c1':field_c1,'field_r1':field_r1,'type':'','rate_real':rate_real}
if len(re.findall('^data.*\.xls',filename_nopath))>0:
tmp['filenametype']='data'
elif len(re.findall('^code.*\.xls',filename_nopath))>0:
tmp['filenametype']='code'
elif len(re.findall('\d+_\d+_\d.xls',filename_nopath))>0:
tmp['filenametype']='wenjuanxing'
if tmp['filenametype']=='code' or set(['key','code','qlist','qtype']) < field_c1:
tmp['type']='code'
if tmp['filenametype']=='wenjuanxing' or len(set(['序号','提交答卷时间','所用时间','来自IP','来源','来源详情','总分'])&field_r1)>=5:
tmp['type']='wenjuanxing'
if tmp['filenametype']=='data' or hqrate>=0.5:
tmp['type']='data'
info.append(tmp)
questype=[k['type'] for k in info]
# 这里有一个优先级存在,优先使用已编码好的数据,其次是问卷星数据
if questype.count('data')*questype.count('code')==1:
data=read_data(filenames[questype.index('data')])
code=read_code(filenames[questype.index('code')])
elif questype.count('wenjuanxing')>=2:
filenames=[(f,info[i]['rate_real']) for i,f in enumerate(filenames) if questype[i]=='wenjuanxing']
tmp=[]
for f,rate_real in filenames:
t2=0 if rate_real<0.5 else 2
d=pd.read_excel(f)
d=d.iloc[:,0]
tmp.append((t2,d))
#print('添加{}'.format(t2))
tmp_equal=0
for t,d0 in tmp[:-1]:
if len(d)==len(d0) and all(d==d0):
tmp_equal+=1
tmp[-1]=(t2+int(t/10)*10,tmp[-1][1])
max_quesnum=max([int(t/10) for t,d in tmp])
if tmp_equal==0:
tmp[-1]=(tmp[-1][0]+max_quesnum*10+10,tmp[-1][1])
#print('修改为{}'.format(tmp[-1][0]))
# 重新整理所有的问卷数据
questype=[t for t,d in tmp]
filenames=[f for f,r in filenames]
quesnums=max([int(t/10) for t in questype])#可能存在的数据组数
filename_wjx=[]
for i in range(1,quesnums+1):
if questype.count(i*10)==1 and questype.count(i*10+2)==1:
filename_wjx.append([filenames[questype.index(i*10)],filenames[questype.index(i*10+2)]])
if len(filename_wjx)==1:
data,code=wenjuanxing(filename_wjx[0])
elif len(filename_wjx)>1:
print('脚本识别出多组问卷星数据,请选择需要编码的数据:')
for i,f in enumerate(filename_wjx):
print('{}: {}'.format(i+1,'/'.join([os.path.split(f[0])[1],os.path.split(f[1])[1]])))
ii=input('您选择的数据是(数据前的编码,如:1):')
ii=re.sub('\s','',ii)
if ii.isnumeric():
data,code=wenjuanxing(filename_wjx[int(ii)-1])
else:
print('您输入正确的编码.')
else:
print('没有找到任何问卷数据..')
raise
else:
print('没有找到任何数据')
raise
return data,code
def spec_rcode(data,code):
city={'北京':0,'上海':0,'广州':0,'深圳':0,'成都':1,'杭州':1,'武汉':1,'天津':1,'南京':1,'重庆':1,'西安':1,'长沙':1,'青岛':1,'沈阳':1,'大连':1,'厦门':1,'苏州':1,'宁波':1,'无锡':1,\
'福州':2,'合肥':2,'郑州':2,'哈尔滨':2,'佛山':2,'济南':2,'东莞':2,'昆明':2,'太原':2,'南昌':2,'南宁':2,'温州':2,'石家庄':2,'长春':2,'泉州':2,'贵阳':2,'常州':2,'珠海':2,'金华':2,\
'烟台':2,'海口':2,'惠州':2,'乌鲁木齐':2,'徐州':2,'嘉兴':2,'潍坊':2,'洛阳':2,'南通':2,'扬州':2,'汕头':2,'兰州':3,'桂林':3,'三亚':3,'呼和浩特':3,'绍兴':3,'泰州':3,'银川':3,'中山':3,\
'保定':3,'西宁':3,'芜湖':3,'赣州':3,'绵阳':3,'漳州':3,'莆田':3,'威海':3,'邯郸':3,'临沂':3,'唐山':3,'台州':3,'宜昌':3,'湖州':3,'包头':3,'济宁':3,'盐城':3,'鞍山':3,'廊坊':3,'衡阳':3,\
'秦皇岛':3,'吉林':3,'大庆':3,'淮安':3,'丽江':3,'揭阳':3,'荆州':3,'连云港':3,'张家口':3,'遵义':3,'上饶':3,'龙岩':3,'衢州':3,'赤峰':3,'湛江':3,'运城':3,'鄂尔多斯':3,'岳阳':3,'安阳':3,\
'株洲':3,'镇江':3,'淄博':3,'郴州':3,'南平':3,'齐齐哈尔':3,'常德':3,'柳州':3,'咸阳':3,'南充':3,'泸州':3,'蚌埠':3,'邢台':3,'舟山':3,'宝鸡':3,'德阳':3,'抚顺':3,'宜宾':3,'宜春':3,'怀化':3,\
'榆林':3,'梅州':3,'呼伦贝尔':3,'临汾':4,'南阳':4,'新乡':4,'肇庆':4,'丹东':4,'德州':4,'菏泽':4,'九江':4,'江门市':4,'黄山':4,'渭南':4,'营口':4,'娄底':4,'永州市':4,'邵阳':4,'清远':4,\
'大同':4,'枣庄':4,'北海':4,'丽水':4,'孝感':4,'沧州':4,'马鞍山':4,'聊城':4,'三明':4,'开封':4,'锦州':4,'汉中':4,'商丘':4,'泰安':4,'通辽':4,'牡丹江':4,'曲靖':4,'东营':4,'韶关':4,'拉萨':4,\
'襄阳':4,'湘潭':4,'盘锦':4,'驻马店':4,'酒泉':4,'安庆':4,'宁德':4,'四平':4,'晋中':4,'滁州':4,'衡水':4,'佳木斯':4,'茂名':4,'十堰':4,'宿迁':4,'潮州':4,'承德':4,'葫芦岛':4,'黄冈':4,'本溪':4,\
'绥化':4,'萍乡':4,'许昌':4,'日照':4,'铁岭':4,'大理州':4,'淮南':4,'延边州':4,'咸宁':4,'信阳':4,'吕梁':4,'辽阳':4,'朝阳':4,'恩施州':4,'达州市':4,'益阳市':4,'平顶山':4,'六安':4,'延安':4,\
'梧州':4,'白山':4,'阜阳':4,'铜陵市':4,'河源':4,'玉溪市':4,'黄石':4,'通化':4,'百色':4,'乐山市':4,'抚州市':4,'钦州':4,'阳江':4,'池州市':4,'广元':4,'滨州':5,'阳泉':5,'周口市':5,'遂宁':5,\
'吉安':5,'长治':5,'铜仁':5,'鹤岗':5,'攀枝花':5,'昭通':5,'云浮':5,'伊犁州':5,'焦作':5,'凉山州':5,'黔西南州':5,'广安':5,'新余':5,'锡林郭勒':5,'宣城':5,'兴安盟':5,'红河州':5,'眉山':5,\
'巴彦淖尔':5,'双鸭山市':5,'景德镇市':5,'鸡西':5,'三门峡':5,'宿州':5,'汕尾':5,'阜新':5,'张掖':5,'玉林':5,'乌兰察布':5,'鹰潭':5,'黑河':5,'伊春':5,'贵港市':5,'漯河':5,'晋城':5,'克拉玛依':5,\
'随州':5,'保山':5,'濮阳':5,'文山州':5,'嘉峪关':5,'六盘水':5,'乌海':5,'自贡':5,'松原':5,'内江':5,'黔东南州':5,'鹤壁':5,'德宏州':5,'安顺':5,'资阳':5,'鄂州':5,'忻州':5,'荆门':5,'淮北':5,\
'毕节':5,'巴音郭楞':5,'防城港':5,'天水':5,'黔南州':5,'阿坝州':5,'石嘴山':5,'安康':5,'亳州市':5,'昌吉州':5,'普洱':5,'楚雄州':5,'白城':5,'贺州':5,'哈密':5,'来宾':5,'庆阳':5,'河池':5,\
'张家界 雅安':5,'辽源':5,'湘西州':5,'朔州':5,'临沧':5,'白银':5,'塔城地区':5,'莱芜':5,'迪庆州':5,'喀什地区':5,'甘孜州':5,'阿克苏':5,'武威':5,'巴中':5,'平凉':5,'商洛':5,'七台河':5,'金昌':5,\
'中卫':5,'阿勒泰':5,'铜川':5,'海西州':5,'吴忠':5,'固原':5,'吐鲁番':5,'阿拉善盟':5,'博尔塔拉州':5,'定西':5,'西双版纳':5,'陇南':5,'大兴安岭':5,'崇左':5,'日喀则':5,'临夏州':5,'林芝':5,\
'海东':5,'怒江州':5,'和田地区':5,'昌都':5,'儋州':5,'甘南州':5,'山南':5,'海南州':5,'海北州':5,'玉树州':5,'阿里地区':5,'那曲地区':5,'黄南州':5,'克孜勒苏州':5,'果洛州':5,'三沙':5}
code_keys=list(code.keys())
for qq in code_keys:
qlist=code[qq]['qlist']
#qtype=code[qq]['qtype']
content=code[qq]['content']
ind=list(data.columns).index(qlist[-1])
data1=data[qlist]
'''
识别问卷星中的城市题
'''
tf1=u'城市' in content
tf2=data1[data1.notnull()].applymap(lambda x:'-' in '%s'%x).all().all()
tf3=(qq+'a' not in data.columns) and (qq+'b' not in data.columns)
if tf1 and tf2 and tf3:
# 省份和城市
tmp1=data[qq].map(lambda x:x.split('-')[0])
tmp2=data[qq].map(lambda x:x.split('-')[1])
tmp2[tmp1==u'上海']=u'上海'
tmp2[tmp1==u'北京']=u'北京'
tmp2[tmp1==u'天津']=u'天津'
tmp2[tmp1==u'重庆']=u'重庆'
tmp2[tmp1==u'香港']=u'香港'
tmp2[tmp1==u'澳门']=u'澳门'
data.insert(ind+1,qq+'a',tmp1)
data.insert(ind+2,qq+'b',tmp2)
code[qq+'a']={'content':'省份','qtype':'填空题','qlist':[qq+'a']}
code[qq+'b']={'content':'城市','qtype':'填空题','qlist':[qq+'b']}
tmp3=data[qq+'b'].map(lambda x: city[x] if x in city.keys() else x)
tmp3=tmp3.map(lambda x: 6 if isinstance(x,str) else x)
data.insert(ind+3,qq+'c',tmp3)
code[qq+'c']={'content':'城市分级','qtype':'单选题','qlist':[qq+'c'],\
'code':{0:'北上广深',1:'新一线',2:'二线',3:'三线',4:'四线',5:'五线',6:'五线以下'}}
return data,code
def levenshtein(s, t):
''''' From Wikipedia article; Iterative with two matrix rows. '''
if s == t: return 0
elif len(s) == 0: return len(t)
elif len(t) == 0: return len(s)
v0 = [None] * (len(t) + 1)
v1 = [None] * (len(t) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(s)):
v1[0] = i + 1
for j in range(len(t)):
cost = 0 if s[i] == t[j] else 1
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
for j in range(len(v0)):
v0[j] = v1[j]
return v1[len(t)]
def code_similar(code1,code2):
'''
题目内容相似度用最小编辑距离来度量
选项相似度分为几种
1、完全相同:1
2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2
2、多选题/排序题:不考虑序号,共同变量超过一半即可:3
3、矩阵单选题:code_r 暂时只考虑完全匹配
4、其他情况为0
'''
code_distance_min=pd.DataFrame(index=code1.keys(),columns=['qnum','similar_content','similar_code'])
for c1 in code1:
# 计算题目内容的相似度
disstance_str=pd.Series(index=code2.keys())
for c2 in code2:
if code1[c1]['qtype']==code2[c2]['qtype']:
disstance_str[c2]=levenshtein(code1[c1]['content'], code2[c2]['content'])
c2=disstance_str.idxmin()
if '%s'%c2 == 'nan':
continue
min_len=(len(code1[c1]['content'])+len(code2[c2]['content']))/2
similar_content=100-100*disstance_str[c2]/min_len if min_len>0 else 0
# 计算选项的相似度
qtype=code2[c2]['qtype']
if qtype == '单选题':
t1=code1[c1]['code']
t2=code2[c2]['code']
inner_key=list(set(t1.keys())&set(t2.keys()))
tmp=all([t1[c]==t2[c] for c in inner_key])
if t1==t2:
similar_code=1
elif len(inner_key)>=0.5*len(set(t1.keys())|set(t2.keys())) and tmp:
similar_code=2
else:
similar_code=0
elif qtype in ['多选题','排序题']:
t1=code1[c1]['code']
t2=code2[c2]['code']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['矩阵多选题']:
t1=code1[c1]['code_r']
t2=code2[c2]['code_r']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['填空题']:
similar_code=1
else:
similar_code=0
code_distance_min.loc[c1,'qnum']=c2
code_distance_min.loc[c1,'similar_content']=similar_content
code_distance_min.loc[c1,'similar_code']=similar_code
# 剔除qnum中重复的值
code_distance_min=code_distance_min.sort_values(['qnum','similar_content','similar_code'],ascending=[False,False,True])
code_distance_min.loc[code_distance_min.duplicated(['qnum']),:]=np.nan
code_distance_min=pd.DataFrame(code_distance_min,index=code1.keys())
return code_distance_min
def data_merge(ques1,ques2,qlist1=None,qlist2=None,name1='ques1',name2='ques2',\
mergeqnum='Q0',similar_threshold=70):
'''合并两份数据
ques1: 列表,[data1,code1]
ques2: 列表,[data2,code2]
'''
data1,code1=ques1
data2,code2=ques2
if (qlist1 is None) or (qlist2 is None):
qlist1=[]
qlist2=[]
qqlist1=[]
qqlist2=[]
code_distance_min=code_similar(code1,code2)
code1_key=sorted(code1,key=lambda x:int(re.findall('\d+',x)[0]))
for c1 in code1_key:
qtype1=code1[c1]['qtype']
#print('{}:{}'.format(c1,code1[c1]['content']))
rs_qq=code_distance_min.loc[c1,'qnum']
similar_content=code_distance_min.loc[c1,'similar_content']
similar_code=code_distance_min.loc[c1,'similar_code']
if (similar_content>=similar_threshold) and (similar_code in [1,2]):
#print('推荐合并第二份数据中的{}({}), 两个题目相似度为为{:.0f}%'.format(rs_qq,code2[rs_qq]['content'],similar))
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
user_qq=rs_qq
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(rs_qq)
elif (similar_content>=similar_threshold) and (similar_code==3):
# 针对非单选题,此时要调整选项顺序
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t1_qlist=code1[c1]['qlist']
t1_value=[t1[k] for k in t1_qlist]
t2=code2[rs_qq]['code_r'] if qtype1 =='矩阵单选题' else code2[rs_qq]['code']
t2_qlist=code2[rs_qq]['qlist']
t2_value=[t2[k] for k in t2_qlist]
# 保留相同的选项
t1_qlist_new=[q for q in t1_qlist if t1[q] in list(set(t1_value)&set(t2_value))]
t2_r=dict(zip([s[1] for s in t2.items()],[s[0] for s in t2.items()]))
t2_qlist_new=[t2_r[s] for s in [t1[q] for q in t1_qlist_new]]
code1[c1]['qlist']=t1_qlist_new
code1[c1]['code']={k:t1[k] for k in t1_qlist_new}
qqlist1+=t1_qlist_new
qqlist2+=t2_qlist_new
qlist1.append(c1)
qlist2.append(rs_qq)
print('将自动合并: {} 和 {} (只保留了相同的选项)'.format(c1,rs_qq))
elif similar_code in [1,2]:
print('-'*40)
print('为【 {}:{} 】自动匹配到: '.format(c1,code1[c1]['content']))
print(' 【 {}:{} 】,其相似度为{:.0f}%.'.format(rs_qq,code2[rs_qq]['content'],similar_content))
tmp=input('是否合并该组题目,请输入 yes/no (也可以输入第二份数据中其他您需要匹配的题目): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['yes','y']:
user_qq=rs_qq
elif tmp in ['no','n']:
user_qq=None
else:
tmp=re.sub('^q','Q',tmp)
if tmp not in code2:
user_qq=None
elif (tmp in code2) and (tmp!=rs_qq):
print('您输入的是{}:{}'.format(tmp,code2[tmp]['content']))
user_qq=tmp
if user_qq==rs_qq:
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
elif user_qq is not None:
# 比对两道题目的code
if 'code' in code1[c1] and len(code1[c1]['code'])>0:
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t2=code2[user_qq]['code_r'] if code2[user_qq]['qtype'] =='矩阵单选题' else code2[user_qq]['code']
if set(t1.values())==set(t2.values()):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('两个题目的选项不匹配,将自动跳过.')
else:
qqlist1+=[code1[c1]['qlist'][0]]
qqlist2+=[code2[user_qq]['qlist'][0]]
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('将自动跳过: {}'.format(c1))
print('-'*40)
else:
print('将自动跳过: {}'.format(c1))
tmp=input('请问您需要的题目是否都已经合并? 请输入(yes / no): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['no','n']:
print('请确保接下来您要合并的题目类型和选项完全一样.')
while 1:
tmp=input('请输入您想合并的题目对,直接回车则终止输入(如: Q1,Q1 ): ')
tmp=re.sub('\s','',tmp)# 去掉空格
tmp=re.sub(',',',',tmp)# 修正可能错误的逗号
tmp=tmp.split(',')
tmp=[re.sub('^q','Q',qq) for qq in tmp]
if len(tmp)<2:
break
if tmp[0] in qlist1 or tmp[1] in qlist2:
print('该题已经被合并,请重新输入')
continue
if tmp[0] not in code1 or tmp[1] not in code2:
print('输入错误, 请重新输入')
continue
c1=tmp[0]
c2=tmp[1]
print('您输入的是:')
print('第一份数据中的【 {}:{} 】'.format(c1,code1[c1]['content']))
print('第二份数据中的【 {}:{} 】'.format(c2,code2[c2]['content']))
w=code_similar({c1:code1[c1]},{c2:code2[c2]})
similar_code=w.loc[c1,'similar_code']
if similar_code in [1,2] and len(code1[c1]['qlist'])==len(code2[c2]['qlist']):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[c2]['qlist']
qlist1.append(c1)
qlist2.append(c2)
print('将自动合并: {} 和 {}'.format(c1,c2))
else:
print('选项不匹配,请重新输入')
else:
qqlist1=[]
for qq in qlist1:
qqlist1=qqlist1+code1[qq]['qlist']
qqlist2=[]
for qq in qlist2:
qqlist2=qqlist2+code2[qq]['qlist']
# 将题号列表转化成data中的列名
if mergeqnum in qqlist1:
mergeqnum=mergeqnum+'merge'
data1=data1.loc[:,qqlist1]
data1.loc[:,mergeqnum]=1
data2=data2.loc[:,qqlist2]
data2.loc[:,mergeqnum]=2
if len(qqlist1)!=len(qqlist2):
print('两份数据选项不完全匹配,请检查....')
raise
data2=data2.rename(columns=dict(zip(qqlist2,qqlist1)))
data12=data1.append(data2,ignore_index=True)
code12={}
for i,cc in enumerate(qlist1):
code12[cc]=code1[cc]
if 'code' in code1[cc] and 'code' in code2[qlist2[i]]:
code12[cc]['code'].update(code2[qlist2[i]]['code'])
code12[mergeqnum]={'content':u'来源','code':{1:name1,2:name2},'qtype':u'单选题','qlist':[mergeqnum]}
return data12,code12
## ===========================================================
#
#
# 数据清洗 #
#
#
## ==========================================================
def clean_ftime(ftime,cut_percent=0.25):
'''
ftime 是完成问卷的秒数
思路:
1、只考虑截断问卷完成时间较小的样本
2、找到完成时间变化的拐点,即需要截断的时间点
返回:r
建议截断<r的样本
'''
t_min=int(ftime.min())
t_cut=int(ftime.quantile(cut_percent))
x=np.array(range(t_min,t_cut))
y=np.array([len(ftime[ftime<=i]) for i in range(t_min,t_cut)])
z1 = np.polyfit(x, y, 4) # 拟合得到的函数
z2=np.polyder(z1,2) #求二阶导数
r=np.roots(np.polyder(z2,1))
r=int(r[0])
return r
## ===========================================================
#
#
# 数据分析和输出 #
#
#
## ==========================================================
def data_auto_code(data):
'''智能判断问卷数据
输入
data: 数据框,列名需要满足Qi或者Qi_
输出:
code: 自动编码
'''
data=pd.DataFrame(data)
columns=data.columns
columns=[c for c in columns if re.match('Q\d+',c)]
code={}
for cc in columns:
# 识别题目号
if '_' not in cc:
key=cc
else:
key=cc.split('_')[0]
# 新的题目则产生新的code
if key not in code:
code[key]={}
code[key]['qlist']=[]
code[key]['code']={}
code[key]['content']=key
code[key]['qtype']=''
# 处理各题目列表
if key == cc:
code[key]['qlist']=[key]
elif re.findall('^'+key+'_[a-zA-Z]{0,}\d+$',cc):
code[key]['qlist'].append(cc)
else:
if 'qlist_open' in code[key]:
code[key]['qlist_open'].append(cc)
else:
code[key]['qlist_open']=[cc]
for kk in code.keys():
dd=data[code[kk]['qlist']]
# 单选题和填空题
if len(dd.columns)==1:
tmp=dd[dd.notnull()].iloc[:,0].unique()
if dd.iloc[:,0].value_counts().mean() >=2:
code[kk]['qtype']=u'单选题'
code[kk]['code']=dict(zip(tmp,tmp))
else:
code[kk]['qtype']=u'填空题'
del code[kk]['code']
else:
tmp=set(dd[dd.notnull()].as_matrix().flatten())
if set(tmp)==set([0,1]):
code[kk]['qtype']=u'多选题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
elif 'R' in code[kk]['qlist'][0]:
code[kk]['qtype']=u'矩阵单选题'
code[kk]['code_r']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
code[kk]['code']=dict(zip(list(tmp),list(tmp)))
else:
code[kk]['qtype']=u'排序题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
return code
def save_data(data,filename=u'data.xlsx',code=None):
'''保存问卷数据到本地
根据filename后缀选择相应的格式保存
如果有code,则保存按文本数据
'''
savetype=os.path.splitext(filename)[1][1:]
data1=data.copy()
if code:
for qq in code.keys():
qtype=code[qq]['qtype']
qlist=code[qq]['qlist']
if qtype == u'单选题':
# 将序号换成文本,题号加上具体内容
data1[qlist[0]].replace(code[qq]['code'],inplace=True)
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
elif qtype == u'矩阵单选题':
# 同单选题
data1[code[qq]['qlist']].replace(code[qq]['code'],inplace=True)
tmp1=code[qq]['qlist']
tmp2=['{}({})'.format(q,code[qq]['code_r'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'排序题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
tmp1=code[qq]['qlist']
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'多选题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
for q in qlist:
data1[q].replace({0:'',1:code[qq]['code'][q]},inplace=True)
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in qlist]
data1.rename(columns=dict(zip(qlist,tmp2)),inplace=True)
else:
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
if (savetype == u'xlsx') or (savetype == u'xls'):
data1.to_excel(filename,index=False)
elif savetype == u'csv':
data1.to_csv(filename,index=False)
def read_data(filename):
savetype=os.path.splitext(filename)[1][1:]
if (savetype==u'xlsx') or (savetype==u'xls'):
data=pd.read_excel(filename)
elif savetype==u'csv':
data=pd.read_csv(filename)
else:
print('con not read file!')
return data
def sa_to_ma(data):
'''单选题数据转换成多选题数据
data是单选题数据, 要求非有效列别为nan
可以使用内置函数pd.get_dummies()代替
'''
if isinstance(data,pd.core.frame.DataFrame):
data=data[data.columns[0]]
#categorys=sorted(data[data.notnull()].unique())
categorys=data[data.notnull()].unique()
try:
categorys=sorted(categorys)
except:
pass
#print('sa_to_ma function::cannot sorted')
data_ma=pd.DataFrame(index=data.index,columns=categorys)
for c in categorys:
data_ma[c]=data.map(lambda x : int(x==c))
data_ma.loc[data.isnull(),:]=np.nan
return data_ma
def to_dummpy(data,code,qqlist=None,qtype_new='多选题',ignore_open=True):
'''转化成哑变量
将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题
返回一个很大的只有0和1的数据
'''
if qqlist is None:
qqlist=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))
bdata=pd.DataFrame()
bcode={}
for qq in qqlist:
qtype=code[qq]['qtype']
data0=data[code[qq]['qlist']]
if qtype=='单选题':
data0=data0.iloc[:,0]
categorys=data0[data0.notnull()].unique()
try:
categorys=sorted(categorys)
except :
pass
categorys=[t for t in categorys if t in code[qq]['code']]
cname=[code[qq]['code'][k] for k in categorys]
columns_name=['{}_A{}'.format(qq,i+1) for i in range(len(categorys))]
tmp=pd.DataFrame(index=data0.index,columns=columns_name)
for i,c in enumerate(categorys):
tmp[columns_name[i]]=data0.map(lambda x : int(x==c))
#tmp.loc[data0.isnull(),:]=0
code_tmp={'content':code[qq]['content'],'qtype':qtype_new}
code_tmp['code']=dict(zip(columns_name,cname))
code_tmp['qlist']=columns_name
bcode.update({qq:code_tmp})
bdata=pd.concat([bdata,tmp],axis=1)
elif qtype in ['多选题','排序题','矩阵单选题']:
bdata=pd.concat([bdata,data0],axis=1)
bcode.update({qq:code[qq]})
bdata=bdata.fillna(0)
try:
bdata=bdata.astype(np.int64,raise_on_error=False)
except :
pass
return bdata,bcode
def qdata_flatten(data,code,quesid=None,userid_begin=None):
'''将问卷数据展平,字段如下
userid: 用户ID
quesid: 问卷ID
qnum: 题号
qname: 题目内容
qtype: 题目类型
samplelen:题目的样本数
itemnum: 选项序号
itemname: 选项内容
code: 用户的选择
codename: 用户选择的具体值
count: 计数
percent(%): 计数占比(百分比)
'''
if not userid_begin:
userid_begin=1000000
data.index=[userid_begin+i+1 for i in range(len(data))]
if '提交答卷时间' in data.columns:
begin_date=pd.to_datetime(data['提交答卷时间']).min().strftime('%Y-%m-%d')
end_date=pd.to_datetime(data['提交答卷时间']).max().strftime('%Y-%m-%d')
else:
begin_date=''
end_date=''
data,code=to_dummpy(data,code,qtype_new='单选题')
code_item={}
for qq in code:
if code[qq]['qtype']=='矩阵单选题':
code_item.update(code[qq]['code_r'])
else :
code_item.update(code[qq]['code'])
qdata=data.stack().reset_index()
qdata.columns=['userid','qn_an','code']
qdata['qnum']=qdata['qn_an'].map(lambda x:x.split('_')[0])
qdata['itemnum']=qdata['qn_an'].map(lambda x:'_'.join(x.split('_')[1:]))
if quesid:
qdata['quesid']=quesid
qdata=qdata[['userid','quesid','qnum','itemnum','code']]
else:
qdata=qdata[['userid','qnum','itemnum','code']]
# 获取描述统计信息:
samplelen=qdata.groupby(['userid','qnum'])['code'].sum().map(lambda x:int(x>0)).unstack().sum()
quesinfo=qdata.groupby(['qnum','itemnum','code'])['code'].count()
quesinfo.name='count'
quesinfo=quesinfo.reset_index()
quesinfo=quesinfo[quesinfo['code']!=0]
#quesinfo=qdata.groupby(['quesid','qnum','itemnum'])['code'].sum()
quesinfo['samplelen']=quesinfo['qnum'].replace(samplelen.to_dict())
quesinfo['percent(%)']=0
quesinfo.loc[quesinfo['samplelen']>0,'percent(%)']=100*quesinfo.loc[quesinfo['samplelen']>0,'count']/quesinfo.loc[quesinfo['samplelen']>0,'samplelen']
quesinfo['qname']=quesinfo['qnum'].map(lambda x: code[x]['content'])
quesinfo['qtype']=quesinfo['qnum'].map(lambda x: code[x]['qtype'])
quesinfo['itemname']=quesinfo['qnum']+quesinfo['itemnum'].map(lambda x:'_%s'%x)
quesinfo['itemname']=quesinfo['itemname'].replace(code_item)
#quesinfo['itemname']=quesinfo['qn_an'].map(lambda x: code[x.split('_')[0]]['code_r'][x] if \
#code[x.split('_')[0]]['qtype']=='矩阵单选题' else code[x.split('_')[0]]['code'][x])
# 各个选项的含义
quesinfo['codename']=''
quesinfo.loc[quesinfo['code']==0,'codename']='否'
quesinfo.loc[quesinfo['code']==1,'codename']='是'
quesinfo['tmp']=quesinfo['qnum']+quesinfo['code'].map(lambda x:'_%s'%int(x))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='矩阵单选题'),'tmp']\
.map(lambda x: code[x.split('_')[0]]['code'][int(x.split('_')[1])]))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='排序题'),'tmp'].map(lambda x: 'Top{}'.format(x.split('_')[1])))
quesinfo['begin_date']=begin_date
quesinfo['end_date']=end_date
if quesid:
quesinfo['quesid']=quesid
quesinfo=quesinfo[['quesid','begin_date','end_date','qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
else:
quesinfo=quesinfo[['qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
# 排序
quesinfo['qnum']=quesinfo['qnum'].astype('category')
quesinfo['qnum'].cat.set_categories(sorted(list(quesinfo['qnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo['itemnum']=quesinfo['itemnum'].astype('category')
quesinfo['itemnum'].cat.set_categories(sorted(list(quesinfo['itemnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo=quesinfo.sort_values(['qnum','itemnum','code'])
return qdata,quesinfo
def confidence_interval(p,n,alpha=0.05):
import scipy.stats as stats
t=stats.norm.ppf(1-alpha/2)
ci=t*math.sqrt(p*(1-p)/n)
#a=p-stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
#b=p+stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
return ci
def sample_size_cal(interval,N,alpha=0.05):
'''调研样本量的计算
参考:https://www.surveysystem.com/sscalc.htm
sample_size_cal(interval,N,alpha=0.05)
输入:
interval: 误差范围,例如0.03
N: 总体的大小,一般1万以上就没啥差别啦
alpha:置信水平,默认95%
'''
import scipy.stats as stats
p=stats.norm.ppf(1-alpha/2)
if interval>1:
interval=interval/100
samplesize=p**2/4/interval**2
if N:
samplesize=samplesize*N/(samplesize+N)
samplesize=int(round(samplesize))
return samplesize
def gof_test(fo,fe=None,alpha=0.05):
'''拟合优度检验
输入:
fo:观察频数
fe:期望频数,缺省为平均数
返回:
1: 样本与总体有差异
0:样本与总体无差异
例子:
gof_test(np.array([0.3,0.4,0.3])*222)
'''
import scipy.stats as stats
fo=np.array(fo).flatten()
C=len(fo)
if not fe:
N=fo.sum()
fe=np.array([N/C]*C)
else:
fe=np.array(fe).flatten()
chi_value=(fo-fe)**2/fe
chi_value=chi_value.sum()
chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)
#CV=np.sqrt((fo-fe)**2/fe**2/(C-1))*100
if chi_value>chi_value_fit:
result=1
else:
result=0
return result
def chi2_test(fo,alpha=0.05):
import scipy.stats as stats
fo=pd.DataFrame(fo)
chiStats = stats.chi2_contingency(observed=fo)
#critical_value = stats.chi2.ppf(q=1-alpha,df=chiStats[2])
#observed_chi_val = chiStats[0]
# p<alpha 等价于 observed_chi_val>critical_value
chi2_data=(chiStats[1] <= alpha,chiStats[1])
return chi2_data
def fisher_exact(fo,alpha=0.05):
'''fisher_exact 显著性检验函数
此处采用的是调用R的解决方案,需要安装包 pyper
python解决方案参见
https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/
但还有些问题,所以没用.
'''
import pyper as pr
r=pr.R(use_pandas=True,use_numpy=True)
r.assign('fo',fo)
r("b<-fisher.test(fo)")
pdata=r['b']
p_value=pdata['p.value']
if p_value<alpha:
result=1
else:
result=0
return (result,p_value)
def anova(data,formula):
'''方差分析
输入
--data: DataFrame格式,包含数值型变量和分类型变量
--formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]
返回[方差分析表]
[总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]
--df:自由度
--sum_sq:误差平方和
--mean_sq:误差平方和/对应的自由度
--F:mean_sq之比
--PR(>F):p值,比如<0.05则代表有显著性差异
'''
import statsmodels.api as sm
from statsmodels.formula.api import ols
cw_lm=ols(formula, data=data).fit() #Specify C for Categorical
r=sm.stats.anova_lm(cw_lm)
return r
def mca(X,N=2):
'''对应分析函数,暂时支持双因素
X:观察频数表
N:返回的维数,默认2维
可以通过scatter函数绘制:
fig=scatter([pr,pc])
fig.savefig('mca.png')
'''
from scipy.linalg import diagsvd
S = X.sum().sum()
Z = X / S # correspondence matrix
r = Z.sum(axis=1)
c = Z.sum()
D_r = np.diag(1/np.sqrt(r))
Z_c = Z - np.outer(r, c) # standardized residuals matrix
D_c = np.diag(1/np.sqrt(c))
# another option, not pursued here, is sklearn.decomposition.TruncatedSVD
P,s,Q = np.linalg.svd(np.dot(np.dot(D_r, Z_c),D_c))
#S=diagsvd(s[:2],P.shape[0],2)
pr=np.dot(np.dot(D_r,P),diagsvd(s[:N],P.shape[0],N))
pc=np.dot(np.dot(D_c,Q.T),diagsvd(s[:N],Q.shape[0],N))
inertia=np.cumsum(s**2)/np.sum(s**2)
inertia=inertia.tolist()
if isinstance(X,pd.DataFrame):
pr=pd.DataFrame(pr,index=X.index,columns=list('XYZUVW')[:N])
pc=pd.DataFrame(pc,index=X.columns,columns=list('XYZUVW')[:N])
return pr,pc,inertia
'''
w=pd.ExcelWriter(u'mca_.xlsx')
pr.to_excel(w,startrow=0,index_label=True)
pc.to_excel(w,startrow=len(pr)+2,index_label=True)
w.save()
'''
def cluster(data,code,cluster_qq,n_clusters='auto',max_clusters=7):
'''对态度题进行聚类
'''
from sklearn.cluster import KMeans
#from sklearn.decomposition import PCA
from sklearn import metrics
#import prince
qq_max=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))[-1]
new_cluster='Q{}'.format(int(re.findall('\d+',qq_max)[0])+1)
#new_cluster='Q32'
qlist=code[cluster_qq]['qlist']
X=data[qlist]
# 去除所有态度题选择的分数都一样的用户(含仅有两个不同)
std_t=min(1.41/np.sqrt(len(qlist)),0.40) if len(qlist)>=8 else 0.10
X=X[X.T.std()>std_t]
index_bk=X.index#备份,方便还原
X.fillna(0,inplace=True)
X1=X.T
X1=(X1-X1.mean())/X1.std()
X1=X1.T.as_matrix()
if n_clusters == 'auto':
#聚类个数的选取和评估
silhouette_score=[]# 轮廊系数
SSE_score=[]
klist=np.arange(2,15)
for k in klist:
est = KMeans(k) # 4 clusters
est.fit(X1)
tmp=np.sum((X1-est.cluster_centers_[est.labels_])**2)
SSE_score.append(tmp)
tmp=metrics.silhouette_score(X1, est.labels_)
silhouette_score.append(tmp)
'''
fig = plt.figure(1)
ax = fig.add_subplot(111)
fig = plt.figure(2)
ax.plot(klist,np.array(silhouette_score))
ax = fig.add_subplot(111)
ax.plot(klist,np.array(SSE_score))
'''
# 找轮廊系数的拐点
ss=np.array(silhouette_score)
t1=[False]+list(ss[1:]>ss[:-1])
t2=list(ss[:-1]>ss[1:])+[False]
k_log=[t1[i]&t2[i] for i in range(len(t1))]
if True in k_log:
k=k_log.index(True)
else:
k=1
k=k if k<=max_clusters-2 else max_clusters-2 # 限制最多分7类
k_best=klist[k]
else:
k_best=n_clusters
est = KMeans(k_best) # 4 clusters
est.fit(X1)
# 系数计算
SSE=np.sqrt(np.sum((X1-est.cluster_centers_[est.labels_])**2)/len(X1))
silhouette_score=metrics.silhouette_score(X1, est.labels_)
print('有效样本数:{},特征数:{},最佳分类个数:{} 类'.format(len(X1),len(qlist),k_best))
print('SSE(样本到所在类的质心的距离)为:{:.2f},轮廊系数为: {:.2f}'.format(SSE,silhouette_score))
# 绘制降维图
'''
X_PCA = PCA(2).fit_transform(X1)
kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),
edgecolor='none', alpha=0.6)
labels=pd.Series(est.labels_)
plt.figure()
plt.scatter(X_PCA[:, 0], X_PCA[:, 1], c=labels, **kwargs)
'''
'''
# 三维立体图
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_PCA[:, 0], X_PCA[:, 1],X_PCA[:, 2], c=labels, **kwargs)
'''
# 导出到原数据
parameters={'methods':'kmeans','inertia':est.inertia_,'SSE':SSE,'silhouette':silhouette_score,\
'n_clusters':k_best,'n_features':len(qlist),'n_samples':len(X1),'qnum':new_cluster,\
'data':X1,'labels':est.labels_}
data[new_cluster]=pd.Series(est.labels_,index=index_bk)
code[new_cluster]={'content':'态度题聚类结果','qtype':'单选题','qlist':[new_cluster],
'code':dict(zip(range(k_best),['cluster{}'.format(i+1) for i in range(k_best)]))}
print('结果已经存进数据, 题号为:{}'.format(new_cluster))
return data,code,parameters
'''
# 对应分析
t=data.groupby([new_cluster])[code[cluster_qq]['qlist']].mean()
t.columns=['R{}'.format(i+1) for i in range(len(code[cluster_qq]['qlist']))]
t=t.rename(index=code[new_cluster]['code'])
ca=prince.CA(t)
ca.plot_rows_columns(show_row_labels=True,show_column_labels=True)
'''
def scatter(data,legend=False,title=None,font_ch=None,find_path=None):
'''
绘制带数据标签的散点图
'''
import matplotlib.font_manager as fm
if font_ch is None:
fontlist=['calibri.ttf','simfang.ttf','simkai.ttf','simhei.ttf','simsun.ttc','msyh.ttf','msyh.ttc']
myfont=''
if not find_path:
find_paths=['C:\\Windows\\Fonts','']
# fontlist 越靠后越优先,findpath越靠后越优先
for find_path in find_paths:
for f in fontlist:
if os.path.exists(os.path.join(find_path,f)):
myfont=os.path.join(find_path,f)
if len(myfont)==0:
print('没有找到合适的中文字体绘图,请检查.')
myfont=None
else:
myfont = fm.FontProperties(fname=myfont)
else:
myfont=fm.FontProperties(fname=font_ch)
fig, ax = plt.subplots()
#ax.grid('on')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)
ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)
color=['blue','red','green','dark']
if not isinstance(data,list):
data=[data]
for i,dd in enumerate(data):
ax.scatter(dd.iloc[:,0], dd.iloc[:,1], c=color[i], s=50,
label=dd.columns[1])
for _, row in dd.iterrows():
ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i],fontproperties=myfont,fontsize=10)
ax.axis('equal')
if legend:
ax.legend(loc='best')
if title:
ax.set_title(title,fontproperties=myfont)
return fig
def sankey(df,filename=None):
'''SanKey图绘制
df的列是左节点,行是右节点
注:暂时没找到好的Python方法,所以只生成R语言所需数据
返回links 和 nodes
# R code 参考
library(networkD3)
dd=read.csv('price_links.csv')
links<-data.frame(source=dd$from,target=dd$to,value=dd$value)
nodes=read.csv('price_nodes.csv',encoding = 'UTF-8')
nodes<-nodes['name']
Energy=c(links=links,nodes=nodes)
sankeyNetwork(Links = links, Nodes = nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh",fontSize = 20,fontFamily='微软雅黑',nodeWidth=20)
'''
nodes=['Total']
nodes=nodes+list(df.columns)+list(df.index)
nodes=pd.DataFrame(nodes)
nodes['id']=range(len(nodes))
nodes.columns=['name','id']
R,C=df.shape
t1=pd.DataFrame(df.as_matrix(),columns=range(1,C+1),index=range(C+1,R+C+1))
t1.index.name='to'
t1.columns.name='from'
links=t1.unstack().reset_index(name='value')
links0=pd.DataFrame({'from':[0]*C,'to':range(1,C+1),'value':list(df.sum())})
links=links0.append(links)
if filename:
links.to_csv(filename+'_links.csv',index=False,encoding='utf-8')
nodes.to_csv(filename+'_nodes.csv',index=False,encoding='utf-8')
return (links,nodes)
def table(data,code,total=True):
'''
单个题目描述统计
code是data的编码,列数大于1
返回字典格式数据:
'fop':百分比, 对于单选题和为1,多选题分母为样本数
'fo': 观察频数表,其中添加了合计项
'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有
'''
# 单选题
qtype=code['qtype']
index=code['qlist']
data=pd.DataFrame(data)
sample_len=data[code['qlist']].notnull().T.any().sum()
result={}
if qtype == u'单选题':
fo=data.iloc[:,0].value_counts()
if 'weight' in code:
w=pd.Series(code['weight'])
fo1=fo[w.index][fo[w.index].notnull()]
fw=(fo1*w).sum()/fo1.sum()
result['fw']=fw
fo.sort_values(ascending=False,inplace=True)
fop=fo.copy()
fop=fop/fop.sum()*1.0
fop[u'合计']=fop.sum()
fo[u'合计']=fo.sum()
if 'code' in code:
fop.rename(index=code['code'],inplace=True)
fo.rename(index=code['code'],inplace=True)
fop.name=u'占比'
fo.name=u'频数'
fop=pd.DataFrame(fop)
fo=pd.DataFrame(fo)
result['fo']=fo
result['fop']=fop
elif qtype == u'多选题':
fo=data.sum()
fo.sort_values(ascending=False,inplace=True)
fo[u'合计']=fo.sum()
if 'code' in code:
fo.rename(index=code['code'],inplace=True)
fop=fo.copy()
fop=fop/sample_len
fop.name=u'占比'
fo.name=u'频数'
fop=pd.DataFrame(fop)
fo=pd.DataFrame(fo)
result['fop']=fop
result['fo']=fo
elif qtype == u'矩阵单选题':
fo=pd.DataFrame(columns=code['qlist'],index=sorted(code['code']))
for i in fo.columns:
fo.loc[:,i]=data[i].value_counts()
if 'weight' not in code:
code['weight']=dict(zip(code['code'].keys(),code['code'].keys()))
fw=pd.DataFrame(columns=[u'加权'],index=code['qlist'])
w=pd.Series(code['weight'])
for c in fo.columns:
t=fo[c]
t=t[w.index][t[w.index].notnull()]
if t.sum()>1e-17:
fw.loc[c,u'加权']=(t*w).sum()/t.sum()
else:
fw.loc[c,u'加权']=0
fw.rename(index=code['code_r'],inplace=True)
result['fw']=fw
result['weight']=','.join(['{}:{}'.format(code['code'][c],code['weight'][c]) for c in code['code']])
fo.rename(columns=code['code_r'],index=code['code'],inplace=True)
fop=fo.copy()
fop=fop/sample_len
result['fop']=fop
result['fo']=fo
elif qtype == u'排序题':
#提供综合统计和TOP1值统计
# 其中综合的算法是当成单选题,给每个TOP分配和为1的权重
#topn=max([len(data[q][data[q].notnull()].unique()) for q in index])
#topn=len(index)
topn=data[index].fillna(0).max().max()
topn=int(topn)
qsort=dict(zip([i+1 for i in range(topn)],[(topn-i)*2.0/(topn+1)/topn for i in range(topn)]))
top1=data.applymap(lambda x:int(x==1))
data_weight=data.replace(qsort)
t1=pd.DataFrame()
t1['TOP1']=top1.sum()
t1[u'综合']=data_weight.sum()
t1.sort_values(by=u'综合',ascending=False,inplace=True)
t1.rename(index=code['code'],inplace=True)
t=t1.copy()
t=t/sample_len
result['fop']=t
result['fo']=t1
# 新增topn矩阵
t_topn=pd.DataFrame()
for i in range(topn):
t_topn['TOP%d'%(i+1)]=data.applymap(lambda x:int(x==i+1)).sum()
t_topn.sort_values(by=u'TOP1',ascending=False,inplace=True)
if 'code' in code:
t_topn.rename(index=code['code'],inplace=True)
result['TOPN_fo']=t_topn#频数
result['TOPN']=t_topn/sample_len
result['weight']='+'.join(['TOP{}*{:.2f}'.format(i+1,(topn-i)*2.0/(topn+1)/topn) for i in range(topn)])
else:
result['fop']=None
result['fo']=None
if (not total) and not(result['fo'] is None) and (u'合计' in result['fo'].index):
result['fo'].drop([u'合计'],axis=0,inplace=True)
result['fop'].drop([u'合计'],axis=0,inplace=True)
if not(result['fo'] is None) and ('code_order' in code):
code_order=[q for q in code['code_order'] if q in result['fo'].index]
if u'合计' in result['fo'].index:
code_order=code_order+[u'合计']
result['fo']=pd.DataFrame(result['fo'],index=code_order)
result['fop']=pd.DataFrame(result['fop'],index=code_order)
return result
def crosstab(data_index,data_column,code_index=None,code_column=None,qtype=None,total=True):
'''适用于问卷数据的交叉统计
输入参数:
data_index: 因变量,放在行中
data_column:自变量,放在列中
code_index: dict格式,指定data_index的编码等信息
code_column: dict格式,指定data_column的编码等信息
qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的
返回字典格式数据
'fop':默认的百分比表,行是data_index,列是data_column
'fo':原始频数表,且添加了总体项
'fw': 加权平均值
简要说明:
因为要处理各类题型,这里将单选题处理为多选题
fo:观察频数表
nij是同时选择了Ri和Cj的频数
总体的频数是选择了Ri的频数,与所在行的总和无关
行变量\列变量 C1 |C2 | C3| C4|总体
R1| n11|n12|n13|n14|n1:
R2| n21|n22|n23|n23|n2:
R3| n31|n32|n33|n34|n3:
fop: 观察百分比表(列变量)
这里比较难处理,data_column各个类别的样本量和总体的样本量不一样,各类别的样本量为同时
选择了行变量和列类别的频数。而总体的样本量为选择了行变量的频数
fw: 加权平均值
如果data_index的编码code含有weight字段,则我们会输出分组的加权平均值
'''
# 将Series转为DataFrame格式
data_index=pd.DataFrame(data_index)
data_column=pd.DataFrame(data_column)
# 获取行/列变量的题目类型
# 默认值
if data_index.shape[1]==1:
qtype1=u'单选题'
else:
qtype1=u'多选题'
if data_column.shape[1]==1:
qtype2=u'单选题'
else:
qtype2=u'多选题'
# 根据参数修正
if code_index:
qtype1=code_index['qtype']
if qtype1 == u'单选题':
data_index.replace(code_index['code'],inplace=True)
elif qtype1 in [u'多选题',u'排序题']:
data_index.rename(columns=code_index['code'],inplace=True)
elif qtype1 == u'矩阵单选题':
data_index.rename(columns=code_index['code_r'],inplace=True)
if code_column:
qtype2=code_column['qtype']
if qtype2 == u'单选题':
data_column.replace(code_column['code'],inplace=True)
elif qtype2 in [u'多选题',u'排序题']:
data_column.rename(columns=code_column['code'],inplace=True)
elif qtype2 == u'矩阵单选题':
data_column.rename(columns=code_column['code_r'],inplace=True)
if qtype:
#qtype=list(qtype)
if isinstance(qtype,list) and len(qtype)==2:
qtype1=qtype[0]
qtype2=qtype[1]
elif isinstance(qtype,str):
qtype1=qtype
if qtype1 == u'单选题':
data_index=sa_to_ma(data_index)
qtype1=u'多选题'
# 将单选题变为多选题
if qtype2 == u'单选题':
#data_column=pd.get_dummies(data_column.iloc[:,0])
data_column=sa_to_ma(data_column)
qtype2=u'多选题'
# 准备工作
index_list=list(data_index.columns)
columns_list=list(data_column.columns)
# 频数表/data_column各个类别的样本量
column_freq=data_column.iloc[list(data_index.notnull().T.any()),:].sum()
#column_freq[u'总体']=column_freq.sum()
column_freq[u'总体']=data_index.notnull().T.any().sum()
R=len(index_list)
C=len(columns_list)
result={}
result['sample_size']=column_freq
if (qtype1 == u'多选题') and (qtype2 == u'多选题'):
data_index.fillna(0,inplace=True)
t=pd.DataFrame(np.dot(data_index.fillna(0).T,data_column.fillna(0)))
t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
if code_index and ('weight' in code_index):
w=pd.Series(code_index['weight'])
w.rename(index=code_index['code'],inplace=True)
fw=pd.DataFrame(columns=[u'加权'],index=t.columns)
for c in t.columns:
tmp=t[c]
tmp=tmp[w.index][tmp[w.index].notnull()]
if abs(tmp.sum())>0:
fw.loc[c,u'加权']=(tmp*w).sum()/tmp.sum()
else:
fw.loc[c,u'加权']=0
fo1=data_index.sum()[w.index][data_index.sum()[w.index].notnull()]
if abs(fo1.sum())>0:
fw.loc[u'总体',u'加权']=(fo1*w).sum()/fo1.sum()
else:
fw.loc[u'总体',u'加权']=0
result['fw']=fw
t[u'总体']=data_index.sum()
t.sort_values([u'总体'],ascending=False,inplace=True)
t1=t.copy()
for i in t.columns:
if column_freq[i]!=0:
t.loc[:,i]=t.loc[:,i]/column_freq[i]
result['fop']=t
result['fo']=t1
elif (qtype1 == u'矩阵单选题') and (qtype2 == u'多选题'):
if code_index and ('weight' in code_index):
data_index.replace(code_index['weight'],inplace=True)
t=pd.DataFrame(np.dot(data_index.fillna(0).T,data_column.fillna(0)))
t=pd.DataFrame(np.dot(t,np.diag(1/data_column.sum())))
t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
t[u'总体']=data_index.mean()
t.sort_values([u'总体'],ascending=False,inplace=True)
t1=t.copy()
result['fop']=t
result['fo']=t1
elif (qtype1 == u'排序题') and (qtype2 == u'多选题'):
topn=int(data_index.max().max())
#topn=max([len(data_index[q][data_index[q].notnull()].unique()) for q in index_list])
qsort=dict(zip([i+1 for i in range(topn)],[(topn-i)*2.0/(topn+1)/topn for i in range(topn)]))
data_index_zh=data_index.replace(qsort)
t=pd.DataFrame(np.dot(data_index_zh.fillna(0).T,data_column.fillna(0)))
t.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
t[u'总体']=data_index_zh.sum()
t.sort_values([u'总体'],ascending=False,inplace=True)
t1=t.copy()
for i in t.columns:
if column_freq[i]!=0:
t.loc[:,i]=t.loc[:,i]/column_freq[i]
result['fop']=t
result['fo']=t1
# 新增TOP1 数据
data_index_top1=data_index.applymap(lambda x:int(x==1))
top1=pd.DataFrame(np.dot(data_index_top1.fillna(0).T,data_column.fillna(0)))
top1.rename(index=dict(zip(range(R),index_list)),columns=dict(zip(range(C),columns_list)),inplace=True)
top1[u'总体']=data_index_top1.fillna(0).sum()
top1.sort_values([u'总体'],ascending=False,inplace=True)
for i in top1.columns:
if column_freq[i]!=0:
top1.loc[:,i]=top1.loc[:,i]/column_freq[i]
result['TOP1']=top1
else:
result['fop']=None
result['fo']=None
# 去除总体
if (not total) and not(result['fo'] is None) and ('总体' in result['fo'].columns):
result['fo'].drop(['总体'],axis=1,inplace=True)
result['fop'].drop(['总体'],axis=1,inplace=True)
# 顺序重排
if not(result['fo'] is None) and code_index and ('code_order' in code_index) and qtype1!='矩阵单选题':
code_order=code_index['code_order']
code_order=[q for q in code_order if q in result['fo'].index]
if u'总体' in result['fo'].index:
code_order=code_order+[u'总体']
result['fo']=pd.DataFrame(result['fo'],index=code_order)
result['fop']=pd.DataFrame(result['fop'],index=code_order)
if not(result['fo'] is None) and code_column and ('code_order' in code_column) and qtype2!='矩阵单选题':
code_order=code_column['code_order']
code_order=[q for q in code_order if q in result['fo'].columns]
if u'总体' in result['fo'].columns:
code_order=code_order+[u'总体']
result['fo']=pd.DataFrame(result['fo'],columns=code_order)
result['fop']=pd.DataFrame(result['fop'],columns=code_order)
return result
def qtable(data,*args,**kwargs):
'''简易频数统计函数
输入
data:数据框,可以是所有的数据
code:数据编码
q1: 题目序号
q2: 题目序号
# 单个变量的频数统计
qtable(data,code,'Q1')
# 两个变量的交叉统计
qtable(data,code,'Q1','Q2')
'''
code=None
q1=None
q2=None
for a in args:
if (isinstance(a,str)) and (not q1):
q1=a
elif (isinstance(a,str)) and (q1):
q2=a
elif isinstance(a,dict):
code=a
if not code:
code=data_auto_code(data)
if not q1:
print('please input the q1,such as Q1.')
return
total=False
for key in kwargs:
if key == 'total':
total=kwargs['total']
if q2 is None:
result=table(data[code[q1]['qlist']],code[q1],total=total)
else:
result=crosstab(data[code[q1]['qlist']],data[code[q2]['qlist']],code[q1],code[q2],total=total)
return result
def association_rules(df,minSup=0.08,minConf=0.4,Y=None):
'''关联规则分析
df: DataFrame,bool 类型。是一个类似购物篮数据
'''
try:
df=df.astype(bool)
except:
print('df 必须为 bool 类型')
return (None,None,None)
columns = np.array(df.columns)
gen=associate.frequent_itemsets(np.array(df), minSup)
itemsets=dict(gen)
rules=associate.association_rules(itemsets,minConf)
rules=pd.DataFrame(list(rules))
if len(rules) == 0:
return (None,None,None)
# 依次是LHS、RHS、支持度、置信度
rules.columns=['antecedent','consequent','sup','conf']
rules['sup']=rules['sup']/len(df)
rules['antecedent']=rules['antecedent'].map(lambda x:[columns[i] for i in list(x)])
rules['consequent']=rules['consequent'].map(lambda x:[columns[i] for i in list(x)])
rules['rule']=rules['antecedent'].map(lambda x:','.join(['%s'%i for i in x]))\
+'-->'\
+rules['consequent'].map(lambda x:','.join(['%s'%i for i in x]))
result=';\n'.join(['{}: 支持度={:.1f}%, 置信度={:.1f}%'.format(\
rules.loc[ii,'rule'],100*rules.loc[ii,'sup'],100*rules.loc[ii,'conf']) for ii in rules.index[:4]])
return (result,rules,itemsets)
def contingency(fo,alpha=0.05):
''' 列联表分析:(观察频数表分析)
# 预增加一个各类别之间的距离
1、生成TGI指数、TWI指数、CHI指数
2、独立性检验
3、当两个变量不显著时,考虑单个之间的显著性
返回字典格式
chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件
coef: 包含chi2、p值、V相关系数
log: 记录一些异常情况
FO: 观察频数
FE: 期望频数
TGI:fo/fe
TWI:fo-fe
CHI:sqrt((fo-fe)(fo/fe-1))*sign(fo-fe)
significant:{
.'result': 显著性结果[1(显著),0(不显著),-1(fe小于5的过多)]
.'pvalue':
.'method': chi_test or fisher_test
.'vcoef':
.'threshold':
}
summary:{
.'summary': 结论提取
.'fit_test': 拟合优度检验
.'chi_std':
.'chi_mean':
'''
import scipy.stats as stats
cdata={}
if isinstance(fo,pd.core.series.Series):
fo=pd.DataFrame(fo)
if not isinstance(fo,pd.core.frame.DataFrame):
return cdata
R,C=fo.shape
# 去除所有的总体、合计、其他、其它
if u'总体' in fo.columns:
fo.drop([u'总体'],axis=1,inplace=True)
if any([(u'其他' in '%s'%s) or (u'其它' in '%s'%s) for s in fo.columns]):
tmp=[s for s in fo.columns if (u'其他' in s) or (u'其它' in s)]
for t in tmp:
fo.drop([t],axis=1,inplace=True)
if u'合计' in fo.index:
fo.drop([u'合计'],axis=0,inplace=True)
if any([(u'其他' in '%s'%s) or (u'其它' in '%s'%s) for s in fo.index]):
tmp=[s for s in fo.index if (u'其他' in s) or (u'其它' in s)]
for t in tmp:
fo.drop([t],axis=0,inplace=True)
fe=fo.copy()
N=fo.sum().sum()
if N==0:
#print('rpt.contingency:: fo的样本数为0,请检查数据')
return cdata
for i in fe.index:
for j in fe.columns:
fe.loc[i,j]=fe.loc[i,:].sum()*fe.loc[:,j].sum()/float(N)
TGI=fo/fe
TWI=fo-fe
CHI=np.sqrt((fo-fe)**2/fe)*(TWI.applymap(lambda x: int(x>0))*2-1)
PCHI=1/(1+np.exp(-1*CHI))
cdata['FO']=fo
cdata['FE']=fe
cdata['TGI']=TGI*100
cdata['TWI']=TWI
cdata['CHI']=CHI
cdata['PCHI']=PCHI
# 显著性检验(独立性检验)
significant={}
significant['threshold']=stats.chi2.ppf(q=1-alpha,df=C-1)
#threshold=math.ceil(R*C*0.2)# 期望频数和实际频数不得小于5
# 去除行、列变量中样本数和过低的变量
threshold=max(3,min(30,N*0.05))
ind1=fo.sum(axis=1)>=threshold
ind2=fo.sum()>=threshold
fo=fo.loc[ind1,ind2]
if (fo.shape[0]<=1) or (np.any(fo.sum()==0)) or (np.any(fo.sum(axis=1)==0)):
significant['result']=-2
significant['pvalue']=-2
significant['method']='fo not frequency'
#elif ((fo<=5).sum().sum()>=threshold):
#significant['result']=-1
#significant['method']='need fisher_exact'
'''fisher_exact运行所需时间极其的长,此处还是不作检验
fisher_r,fisher_p=fisher_exact(fo)
significant['pvalue']=fisher_p
significant['method']='fisher_exact'
significant['result']=fisher_r
'''
else:
try:
chiStats = stats.chi2_contingency(observed=fo)
except:
chiStats=(1,np.nan)
significant['pvalue']=chiStats[1]
significant['method']='chi-test'
#significant['vcoef']=math.sqrt(chiStats[0]/N/min(R-1,C-1))
if chiStats[1] <= alpha:
significant['result']=1
elif np.isnan(chiStats[1]):
significant['pvalue']=-2
significant['result']=-1
else:
significant['result']=0
cdata['significant']=significant
# 列联表分析summary
chi_sum=(CHI**2).sum(axis=1)
chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)#拟合优度检验
fit_test=chi_sum.map(lambda x : int(x>chi_value_fit))
summary={}
summary['fit_test']=fit_test
summary['chi_std']=CHI.unstack().std()
summary['chi_mean']=CHI.unstack().mean()
#print('the std of CHI is %.2f'%summary['chi_std'])
conclusion=''
fo_rank=fo.sum().rank(ascending=False)# 给列选项排名,只分析排名在前4选项的差异
for c in fo_rank[fo_rank<5].index:#CHI.columns:
#针对每一列,选出大于一倍方差的行选项,如果过多,则只保留前三个
tmp=list(CHI.loc[CHI[c]-summary['chi_mean']>summary['chi_std'],c].sort_values(ascending=False)[:3].index)
tmp=['%s'%s for s in tmp]# 把全部内容转化成字符串
if tmp:
tmp1=u'{col}:{s}'.format(col=c,s=' || '.join(tmp))
conclusion=conclusion+tmp1+'; \n'
if significant['result']==1:
if conclusion:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*显著*, 且CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*显著*,但没有找到相对有差异的配对'
elif significant['result']==0:
if conclusion:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*不显著*, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp='在95%置信水平下显著性检验(卡方检验)结果为*不显著*,且没有找到相对有差异的配对'
else:
if conclusion:
tmp='不满足显著性检验(卡方检验)条件, 但CHI指标在一个标准差外的(即相对有差异的)有:\n'
else:
tmp='不满足显著性检验(卡方检验)条件,且没有找到相对有差异的配对'
conclusion=tmp+conclusion
summary['summary']=conclusion
cdata['summary']=summary
return cdata
def pre_cross_qlist(data,code):
'''自适应给出可以进行交叉分析的变量和相应选项
满足以下条件的将一键交叉分析:
1、单选题
2、如果选项是文本,则平均长度应小于10
...
返回:
cross_qlist: [[题目序号,变量选项],]
'''
cross_qlist=[]
for qq in code:
qtype=code[qq]['qtype']
qlist=code[qq]['qlist']
content=code[qq]['content']
sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()
if qtype not in ['单选题']:
continue
if not(set(qlist) <= set(data.columns)):
continue
t=qtable(data,code,qq)['fo']
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
code_order=[q for q in code_order if q in t.index]
t=pd.DataFrame(t,index=code_order)
items=list(t.index)
code_values=list(code[qq]['code'].values())
if len(items)<=1:
continue
if all([isinstance(t,str) for t in code_values]):
if sum([len(t) for t in code_values])/len(code_values)>15:
continue
if ('code_order' in code[qq]) and (len(items)<10):
code_order=[q for q in code[qq]['code_order'] if q in t.index]
t=pd.DataFrame(t,index=code_order)
ind=np.where(t['频数']>=10)[0]
if len(ind)>0:
cross_order=list(t.index[range(ind[0],ind[-1]+1)])
cross_qlist.append([qq,cross_order])
continue
if re.findall('性别|年龄|gender|age',content.lower()):
cross_qlist.append([qq,items])
continue
if (len(items)<=sample_len_qq/30) and (len(items)<10):
cross_order=list(t.index[t['频数']>=10])
if cross_order:
cross_qlist.append([qq,cross_order])
continue
return cross_qlist
'''
import report as rpt
ppt=rpt.Report(template)
ppt.add_cover(filename)
ppt.add_slide(data=,title)
ppt.save()
ppt.plo
'''
def cross_chart(data,code,cross_class,filename=u'交叉分析报告', cross_qlist=None,\
delclass=None,plt_dstyle=None,cross_order=None,reverse_display=False,\
total_display=True,max_column_chart=20,save_dstyle=None,template=None):
'''使用帮助
data: 问卷数据,包含交叉变量和所有的因变量
code: 数据编码
cross_class: 交叉变量,单选题或者多选题,例如:Q1
filename:文件名,用于PPT和保存相关数据
cross_list: 需要进行交叉分析的变量,缺省为code中的所有变量
delclass: 交叉变量中需要删除的单个变量,缺省空
plt_dstyle: 绘制图表需要用的数据类型,默认为百分比表,可以选择['TGI'、'CHI'、'TWI']等
save_dstyle: 需要保存的数据类型,格式为列表。
cross_order: 交叉变量中各个类别的顺序,可以缺少
total_display: PPT绘制图表中是否显示总体情况
max_column_chart: 列联表的列数,小于则用柱状图,大于则用条形图
template: PPT模板信息,{'path': 'layouts':}缺省用自带的。
'''
# ===================参数预处理=======================
if plt_dstyle:
plt_dstyle=plt_dstyle.upper()
if not cross_qlist:
try:
cross_qlist=list(sorted(code,key=lambda c: int(re.findall('\d+',c)[0])))
except:
cross_qlist=list(code.keys())
if cross_class in cross_qlist:
cross_qlist.remove(cross_class)
# =================基本数据获取==========================
#交叉分析的样本数统一为交叉变量的样本数
sample_len=data[code[cross_class]['qlist']].notnull().T.any().sum()
# 交叉变量中每个类别的频数分布.
if code[cross_class]['qtype'] == u'单选题':
#data[cross_class].replace(code[cross_class]['code'],inplace=True)
cross_class_freq=data[code[cross_class]['qlist'][0]].value_counts()
cross_class_freq[u'合计']=cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'],inplace=True)
#cross_columns_qlist=code[cross_class]['qlist']
elif code[cross_class]['qtype'] == u'多选题':
cross_class_freq=data[code[cross_class]['qlist']].sum()
cross_class_freq[u'合计']=cross_class_freq.sum()
cross_class_freq.rename(index=code[cross_class]['code'],inplace=True)
#data.rename(columns=code[cross_class]['code'],inplace=True)
#cross_columns_qlist=[code[cross_class]['code'][k] for k in code[cross_class]['qlist']]
elif code[cross_class]['qtype'] == u'排序题':
tmp=qtable(data,code,cross_class)
#tmp,tmp1=table(data[code[cross_class]['qlist']],code[cross_class])
cross_class_freq=tmp['fo'][u'综合']
cross_class_freq[u'合计']=cross_class_freq.sum()
# ================I/O接口=============================
# pptx 接口
prs=rpt.Report(template) if template else rpt.Report()
if not os.path.exists('.\\out'):
os.mkdir('.\\out')
# 生成数据接口(因为exec&eval)
Writer=pd.ExcelWriter('.\\out\\'+filename+u'.xlsx')
Writer_save={}
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[u'Writer_'+dstyle]=pd.ExcelWriter('.\\out\\'+filename+u'_'+dstyle+'.xlsx')
result={}#记录每道题的的统计数据,用户函数的返回数据
# 记录没到题目的样本数和显著性差异检验结果,用于最后的数据输出
cross_columns=list(cross_class_freq.index)
cross_columns=[r for r in cross_columns if r!=u'合计']
cross_columns=['内容','题型']+cross_columns+[u'总体',u'显著性检验']
conclusion=pd.DataFrame(index=cross_qlist,columns=cross_columns)
conclusion.to_excel(Writer,u'索引')
# ================封面页=============================
prs.add_cover(title=filename)
# ================背景页=============================
title=u'说明'
summary=u'交叉题目为'+cross_class+u': '+code[cross_class]['content']
summary=summary+'\n'+u'各类别样本量如下:'
prs.add_slide(data={'data':cross_class_freq,'slide_type':'table'},title=title,\
summary=summary)
data_column=data[code[cross_class]['qlist']]
for qq in cross_qlist:
# 遍历所有题目
#print(qq)
qtitle=code[qq]['content']
qlist=code[qq]['qlist']
qtype=code[qq]['qtype']
if not(set(qlist) <= set(data.columns)):
continue
data_index=data[qlist]
sample_len=data_column.iloc[list(data_index.notnull().T.any()),:].notnull().T.any().sum()
summary=None
if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:
continue
# 交叉统计
try:
if reverse_display:
result_t=crosstab(data_column,data_index,code_index=code[cross_class],code_column=code[qq])
else:
result_t=crosstab(data_index,data_column,code_index=code[qq],code_column=code[cross_class])
except :
print('脚本在处理{}时出了一天小问题.....')
continue
if ('fo' in result_t) and ('fop' in result_t):
t=result_t['fop']
t1=result_t['fo']
qsample=result_t['sample_size']
else:
continue
if t is None:
continue
# =======数据修正==============
if cross_order and (not reverse_display):
if u'总体' not in cross_order:
cross_order=cross_order+[u'总体']
cross_order=[q for q in cross_order if q in t.columns]
t=pd.DataFrame(t,columns=cross_order)
t1=pd.DataFrame(t1,columns=cross_order)
if cross_order and reverse_display:
cross_order=[q for q in cross_order if q in t.index]
t=pd.DataFrame(t,index=cross_order)
t1=pd.DataFrame(t1,index=cross_order)
'''在crosstab中已经重排了
if 'code_order' in code[qq] and qtype!='矩阵单选题':
code_order=code[qq]['code_order']
if reverse_display:
#code_order=[q for q in code_order if q in t.columns]
if u'总体' in t1.columns:
code_order=code_order+[u'总体']
t=pd.DataFrame(t,columns=code_order)
t1=pd.DataFrame(t1,columns=code_order)
else:
#code_order=[q for q in code_order if q in t.index]
t=pd.DataFrame(t,index=code_order)
t1=pd.DataFrame(t1,index=code_order)
'''
t.fillna(0,inplace=True)
t1.fillna(0,inplace=True)
# =======保存到Excel中========
t2=pd.concat([t,t1],axis=1)
t2.to_excel(Writer,qq,index_label=qq,float_format='%.3f')
Writer_rows=len(t2)# 记录当前Excel文件写入的行数
pd.DataFrame(qsample,columns=['样本数']).to_excel(Writer,qq,startrow=Writer_rows+2)
Writer_rows+=len(qsample)+2
#列联表分析
cdata=contingency(t1,alpha=0.05)# 修改容错率
result[qq]=cdata
if cdata:
summary=cdata['summary']['summary']
# 保存各个指标的数据
if save_dstyle:
for dstyle in save_dstyle:
cdata[dstyle].to_excel(Writer_save[u'Writer_'+dstyle],qq,index_label=qq,float_format='%.2f')
if qtype in [u'单选题',u'多选题',u'排序题']:
plt_data=t*100
else:
plt_data=t.copy()
if (abs(1-plt_data.sum())<=0.01+1e-17).all():
plt_data=plt_data*100
# ========================【特殊题型处理区】================================
if 'fw' in result_t:
plt_data=result_t['fw']
if cross_order and (not reverse_display):
if u'总体' not in cross_order:
cross_order=cross_order+[u'总体']
cross_order=[q for q in cross_order if q in plt_data.index]
plt_data=pd.DataFrame(plt_data,index=cross_order)
plt_data.to_excel(Writer,qq,startrow=Writer_rows+2)
Writer_rows+=len(plt_data)
if plt_dstyle and isinstance(cdata,dict) and (plt_dstyle in cdata):
plt_data=cdata[plt_dstyle]
# 绘制PPT
title=qq+'['+qtype+']: '+qtitle
if not summary:
summary=u'这里是结论区域.'
if 'significant' in cdata:
sing_result=cdata['significant']['result']
sing_pvalue=cdata['significant']['pvalue']
else:
sing_result=-2
sing_pvalue=-2
footnote=u'显著性检验的p值为{:.3f},数据来源于{},样本N={}'.format(sing_pvalue,qq,sample_len)
# 保存相关数据
conclusion.loc[qq,:]=qsample
conclusion.loc[qq,[u'内容',u'题型']]=pd.Series({u'内容':code[qq]['content'],u'题型':code[qq]['qtype']})
conclusion.loc[qq,u'显著性检验']=sing_result
if (not total_display) and (u'总体' in plt_data.columns):
plt_data.drop([u'总体'],axis=1,inplace=True)
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
# 排序题特殊处理
if (qtype == u'排序题') and ('TOP1' in result_t):
plt_data=result_t['TOP1']*100
# =======数据修正==============
if cross_order and (not reverse_display):
if u'总体' not in cross_order:
cross_order=cross_order+[u'总体']
cross_order=[q for q in cross_order if q in plt_data.columns]
plt_data=pd.DataFrame(plt_data,columns=cross_order)
if cross_order and reverse_display:
cross_order=[q for q in cross_order if q in plt_data.index]
plt_data=pd.DataFrame(plt_data,index=cross_order)
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
if reverse_display:
#code_order=[q for q in code_order if q in t.columns]
if u'总体' in t1.columns:
code_order=code_order+[u'总体']
plt_data=pd.DataFrame(plt_data,columns=code_order)
else:
#code_order=[q for q in code_order if q in t.index]
plt_data=pd.DataFrame(plt_data,index=code_order)
plt_data.fillna(0,inplace=True)
title='[TOP1]' + title
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
'''
# ==============小结页=====================
difference=pd.Series(difference,index=total_qlist_0)
'''
# ========================文件生成和导出======================
#difference.to_csv('.\\out\\'+filename+u'_显著性检验.csv',encoding='gbk')
if plt_dstyle:
filename=filename+'_'+plt_dstyle
try:
prs.save('.\\out\\'+filename+u'.pptx')
except:
prs.save('.\\out\\'+filename+u'_副本.pptx')
conclusion.to_excel(Writer,'索引')
Writer.save()
if save_dstyle:
for dstyle in save_dstyle:
Writer_save[u'Writer_'+dstyle].save()
return result
def summary_chart(data,code,filename=u'整体统计报告', summary_qlist=None,\
max_column_chart=20,template=None):
# ===================参数预处理=======================
if not summary_qlist:
try:
summary_qlist=list(sorted(code,key=lambda c: int(re.findall('\d+',c)[0])))
except:
summary_qlist=list(code.keys())
# =================基本数据获取==========================
#统一的有效样本,各个题目可能有不能的样本数
sample_len=len(data)
# ================I/O接口=============================
# pptx 接口
prs=rpt.Report(template) if template else rpt.Report()
if not os.path.exists('.\\out'):
os.mkdir('.\\out')
Writer=pd.ExcelWriter('.\\out\\'+filename+'.xlsx')
result={}#记录每道题的过程数据
# 记录样本数等信息,用于输出
conclusion=pd.DataFrame(index=summary_qlist,columns=[u'内容',u'题型',u'样本数'])
conclusion.to_excel(Writer,u'索引')
# ================封面页=============================
prs.add_cover(title=filename)
# ================背景页=============================
title=u'说明'
qtype_count=[code[k]['qtype'] for k in code]
qtype_count=[[qtype,qtype_count.count(qtype)] for qtype in set(qtype_count)]
qtype_count=sorted(qtype_count,key=lambda x:x[1],reverse=True)
summary='该数据一共有{}个题目,其中有'.format(len(code))
summary+=','.join(['{} {} 道'.format(t[0],t[1]) for t in qtype_count])
summary+='.\n 经统计, 该数据有效样本数为 {} 份。下表是在该样本数下,各比例对应的置信区间(置信水平95%).'.format(sample_len)
w=pd.DataFrame(index=[(i+1)*0.05 for i in range(10)],columns=['比例','置信区间'])
w['比例']=w.index
w['置信区间']=w['比例'].map(lambda x:confidence_interval(x,sample_len))
w['置信区间']=w['置信区间'].map(lambda x:'±{:.1f}%'.format(x*100))
w['比例']=w['比例'].map(lambda x:'{:.0f}% / {:.0f}%'.format(x*100,100-100*x))
w=w.set_index('比例')
prs.add_slide(data={'data':w,'slide_type':'table'},title=title,summary=summary)
for qq in summary_qlist:
'''
特殊题型处理
整体满意度题:后期归为数值类题型
'''
#print(qq)
qtitle=code[qq]['content']
qlist=code[qq]['qlist']
qtype=code[qq]['qtype']
if not(set(qlist) <= set(data.columns)):
continue
sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()
conclusion.loc[qq,u'内容']=qtitle
conclusion.loc[qq,u'题型']=qtype
conclusion.loc[qq,u'样本数']=sample_len_qq
# 填空题只统计数据,不绘图
if qtype == '填空题':
startcols=0
for qqlist in qlist:
tmp=pd.DataFrame(data[qqlist].value_counts()).reset_index()
tmp.to_excel(Writer,qq,startcol=startcols,index=False)
startcols+=3
continue
if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:
continue
try:
result_t=table(data[qlist],code=code[qq])
except:
print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))
continue
t=result_t['fop']
t1=result_t['fo']
# =======数据修正==============
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
code_order=[q for q in code_order if q in t.index]
if u'合计' in t.index:
code_order=code_order+[u'合计']
t=pd.DataFrame(t,index=code_order)
t1=pd.DataFrame(t1,index=code_order)
t.fillna(0,inplace=True)
t1.fillna(0,inplace=True)
# =======保存到Excel中========
Writer_rows=0
t2=pd.concat([t,t1],axis=1)
t2.to_excel(Writer,qq,startrow=Writer_rows,index_label=qq,float_format='%.3f')
Writer_rows+=len(t2)+2
# ==========根据个题型提取结论==================
summary=''
if qtype in ['单选题','多选题']:
try:
gof_result=gof_test(t1)
except :
gof_result=-2
if gof_result==1:
summary+='拟合优度检验*显著*'
elif gof_result==0:
summary+='拟合优度检验*不显著*'
else:
summary+='不满足拟合优度检验条件'
if qtype == '多选题':
tmp=data[qlist].rename(columns=code[qq]['code'])
tmp_t=len(tmp)*tmp.shape[1]*np.log(tmp.shape[1])
if tmp_t<20000:
minSup=0.08
minConf=0.40
elif tmp_t<50000:
minSup=0.15
minConf=0.60
else:
minSup=0.20
minConf=0.60
aso_result,rules,freq=association_rules(tmp,minSup=minSup,minConf=minConf)
numItem_mean=t1.sum().sum()/sample_len_qq
if u'合计' in t1.index:
numItem_mean=numItem_mean/2
if aso_result:
summary+=' || 平均每个样本选了{:.1f}个选项 || 找到的关联规则如下(只显示TOP4):\n{}'.format(numItem_mean,aso_result)
rules.to_excel(Writer,qq,startrow=Writer_rows,index=False,float_format='%.3f')
Writer_rows+=len(rules)+2
else:
summary+=' || 平均每个样本选了{:.1f}个选项 || 没有找到关联性较大的规则'.format(numItem_mean)
# 各种题型的结论和相关注释。
if (qtype in [u'单选题']) and 'fw' in result_t:
tmp=u'加权平均值'
if ('name' in code[qq]) and code[qq]['name']==u'满意度':
tmp=u'满意度平均值'
elif ('name' in code[qq]) and code[qq]['name']=='NPS':
tmp=u'NPS值'
summary+=' || {}为:{:.3f}'.format(tmp,result_t['fw'])
elif qtype =='排序题':
summary+=' 此处“综合”指标的计算方法为 :={}/总频数.'.format(result_t['weight'])
if len(summary)==0:
summary+=u'这里是结论区域'
# ===============数据再加工==========================
if qtype in [u'单选题',u'多选题',u'排序题']:
plt_data=t*100
else:
plt_data=t.copy()
if u'合计' in plt_data.index:
plt_data.drop([u'合计'],axis=0,inplace=True)
result[qq]=plt_data
title=qq+'['+qtype+']: '+qtitle
footnote=u'数据来源于%s,样本N=%d'%(qq,sample_len_qq)
# 绘制图表plt_data一般是Series,对于矩阵单选题,其是DataFrame
if len(t)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
elif (len(t)>3) or (len(plt_data.shape)>1 and plt_data.shape[1]>1):
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_CLUSTERED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'PIE'},\
title=title,summary=summary,footnote=footnote)
#==============特殊题型处理===============
# 矩阵单选题特殊处理
if (qtype == u'矩阵单选题') and ('fw' in result_t):
plt_data=result_t['fw']
plt_data.rename(columns={u'加权':u'平均值'},inplace=True)
plt_data.to_excel(Writer,qq,startrow=Writer_rows,float_format='%.3f')
Writer_rows=len(plt_data)+2
plt_data.fillna(0,inplace=True)
title='[平均值]'+title
summary=summary+' || 该平均分采用的权值是:\n'+result_t['weight']
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_STACKED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_STACKED'},\
title=title,summary=summary,footnote=footnote)
# 排序题特殊处理
if (qtype == u'排序题') and ('TOPN' in result_t):
plt_data=result_t['TOPN']
# 将频数和频数百分表保存至本地
tmp=pd.concat([result_t['TOPN'],result_t['TOPN_fo']],axis=1)
tmp.to_excel(Writer,qq,startrow=Writer_rows,float_format='%.3f')
Writer_rows=len(plt_data)+2
plt_data=plt_data*100
# =======数据修正==============
if 'code_order' in code[qq]:
code_order=code[qq]['code_order']
#code_order=[q for q in code_order if q in t.index]
if u'合计' in plt_data.index:
code_order=code_order+[u'合计']
plt_data=pd.DataFrame(plt_data,index=code_order)
plt_data.fillna(0,inplace=True)
title='[TOPN]'+title
if len(plt_data)>max_column_chart:
prs.add_slide(data={'data':plt_data[::-1],'slide_type':'chart','type':'BAR_STACKED'},\
title=title,summary=summary,footnote=footnote)
else:
prs.add_slide(data={'data':plt_data,'slide_type':'chart','type':'COLUMN_STACKED'},\
title=title,summary=summary,footnote=footnote)
# ========================文件生成和导出======================
try:
prs.save('.\\out\\'+filename+u'.pptx')
except:
prs.save('.\\out\\'+filename+u'_副本.pptx')
conclusion.to_excel(Writer,'索引')
Writer.save()
return result
def onekey_gen(data,code,filename=u'reprotgen 报告自动生成',template=None):
'''一键生成所有可能需要的报告
包括
描述统计报告
单选题的交叉分析报告
'''
try:
summary_chart(data,code,filename=filename,template=template);
except:
print('整体报告生成过程中出现错误,将跳过..')
pass
print('已生成 '+filename)
cross_qlist=pre_cross_qlist(data,code)
if len(cross_qlist)==0:
return None
for cross_qq in cross_qlist:
qq=cross_qq[0]
cross_order=cross_qq[1]
if ('name' in code[qq]) and (code[qq]['name']!=''):
filename='{}_差异分析'.format(code[qq]['name'])
else:
filename='{}_差异分析'.format(qq)
save_dstyle=None #['TGI','CHI']
try:
cross_chart(data,code,qq,filename=filename,cross_order=cross_order,\
save_dstyle=save_dstyle,template=template);
print('已生成 '+filename)
except:
print(filename+'生成过程中出现错误,将跳过...')
pass
return None
def scorpion(data,code,filename='scorpion'):
'''天蝎X计划
返回一个excel文件
1、索引
2、各个题目的频数表
3、所有可能的交叉分析
'''
if not os.path.exists('.\\out'):
os.mkdir('.\\out')
Writer=pd.ExcelWriter('.\\out\\'+filename+'.xlsx')
try:
qqlist=list(sorted(code,key=lambda c: int(re.findall('\d+',c)[0])))
except:
qqlist=list(code.keys())
qIndex=pd.DataFrame(index=qqlist,columns=[u'content',u'qtype',u'SampleSize'])
qIndex.to_excel(Writer,u'索引')
# 生成索引表和频数表
Writer_rows=0
for qq in qqlist:
qtitle=code[qq]['content']
qlist=code[qq]['qlist']
qtype=code[qq]['qtype']
if not(set(qlist) <= set(data.columns)):
continue
sample_len_qq=data[code[qq]['qlist']].notnull().T.any().sum()
qIndex.loc[qq,u'content']=qtitle
qIndex.loc[qq,u'qtype']=qtype
qIndex.loc[qq,u'SampleSize']=sample_len_qq
if qtype not in [u'单选题',u'多选题',u'排序题',u'矩阵单选题']:
continue
try:
result_t=table(data[qlist],code=code[qq])
except:
print(u'脚本处理 {} 时出了一点小问题.....'.format(qq))
continue
fop=result_t['fop']
fo=result_t['fo']
if (qtype == u'排序题') and ('TOPN' in result_t):
tmp=result_t['TOPN']
tmp[u'综合']=fo[u'综合']
fo=tmp.copy()
tmp=result_t['TOPN_fo']
tmp[u'综合']=fop[u'综合']
fop=tmp.copy()
# =======保存到Excel中========
fo_fop=pd.concat([fo,fop],axis=1)
fo_fop.to_excel(Writer,u'频数表',startrow=Writer_rows,startcol=1,index_label=code[qq]['content'],float_format='%.3f')
tmp=pd.DataFrame({'name':[qq]})
tmp.to_excel(Writer,u'频数表',index=False,header=False,startrow=Writer_rows)
Writer_rows+=len(fo_fop)+3
qIndex.to_excel(Writer,'索引')
crossAna=pd.DataFrame(columns=['RowVar','ColVar','SampleSize','pvalue','significant','summary'])
N=0
qqlist=[qq for qq in qqlist if code[qq]['qtype'] in ['单选题','多选题','矩阵单选题','排序题']]
start_time=time.clock()
N_cal=len(qqlist)*(len(qqlist)-1)*0.1# 用于计算脚本剩余时间
for qq1 in qqlist:
for qq2 in qqlist:
#qtype1=code[qq1]['qtype']
if (N>=N_cal) and (N<N_cal+1.0):
tmp=(time.clock()-start_time)*9
if tmp>60:
print('请耐心等待, 预计还需要{:.1f}秒'.format(tmp))
qtype2=code[qq2]['qtype']
if (qq1==qq2) or (qtype2 not in [u'单选题',u'多选题']):
continue
data_index=data[code[qq1]['qlist']]
data_column=data[code[qq2]['qlist']]
samplesize=data_column.iloc[list(data_index.notnull().T.any()),:].notnull().T.any().sum()
try:
fo=qtable(data,code,qq1,qq2)['fo']
except :
crossAna.loc[N,:]=[qq1,qq2,samplesize,'','','']
N+=1
continue
try:
cdata=contingency(fo,alpha=0.05)
except :
crossAna.loc[N,:]=[qq1,qq2,samplesize,'','','']
N+=1
continue
if cdata:
result=cdata['significant']['result']
pvalue=cdata['significant']['pvalue']
summary=cdata['summary']['summary']
else:
result=-2
pvalue=-2
summary='没有找到结论'
summary='\n'.join(summary.splitlines()[1:])#去掉第一行
if len(summary)==0:
summary='没有找到结论'
crossAna.loc[N,:]=[qq1,qq2,samplesize,pvalue,result,summary]
N+=1
crossAna.to_excel(Writer,'交叉分析表',index=False)
Writer.save()
| 36.697011 | 156 | 0.537062 | [
"MIT"
] | brightgeng/reportgen | reportgen/questionnaire/questionnaire.py | 121,305 | Python |
# -*- coding: utf-8 -*-
"""Functions for retrieving raw and processed run data"""
from datetime import datetime
from html_table_parser import HTMLTableParser
from utils import convert_numbers
import commands
import vcf
def laneHTML(run, path):
"""Retrieve data from the lane.html page, the data is the general run data and date per lane"""
try:
lane_dict = {}
data_run = {}
epoch = datetime.utcfromtimestamp(0)
dict_run = {
'Cluster_Raw': {'column': 'Clusters (Raw)'},
'Cluster_PF': {'column': 'Clusters(PF)'},
'Yield_Mbases': {'column': 'Yield (MBases)'}
}
dict_lane = {
'Lane': {'column': 'Lane'},
'PF_Clusters': {'column': 'PF Clusters'},
'PCT_of_lane': {'column': '% of the lane'},
'PCT_perfect_barcode': {'column': '% Perfect barcode'},
'PCT_one_mismatch_barcode': {'column': '% One mismatch barcode'},
'Yield_Mbases': {'column': 'Yield (Mbases)'},
'PCT_PF_Clusters': {'column': '% PF Clusters'},
'PCT_Q30_bases': {'column': '% = Q30 bases'},
'Mean_Quality_Score': {'column': 'Mean Quality Score'}
}
date = run.split('_')[0]
date = '20' + date[0:2] + '-' + date[2:4] + '-' + date[4:6]
d = datetime.strptime(date, '%Y-%m-%d')
as_date = (d-epoch).days
lanehtml = commands.getoutput('find {path}/Data/Intensities/BaseCalls/Reports/html/*/all/all/all/ -iname \'lane.html\''.format(
path=str(path)
))
with open(lanehtml, 'r') as lane:
html = lane.read()
tableParser = HTMLTableParser()
tableParser.feed(html)
tables = tableParser.tables # tables[1]==run tables[2]==lane
header_run = tables[1][0]
header_lane = tables[2][0]
for col in dict_run:
dict_run[col]['index'] = header_run.index(dict_run[col]['column'])
for col in dict_lane:
dict_lane[col]['index'] = header_lane.index(dict_lane[col]['column'])
stats_run = tables[1][1]
stats_run = [convert_numbers(item.replace(',', '')) for item in stats_run]
for col in dict_run:
stat = stats_run[dict_run[col]['index']]
stat = int(stat)
data_run[col] = stat
data_run['Date'] = date
data_run['asDate'] = as_date
for lane in tables[2][1:]:
data_lane = {}
lane = [convert_numbers(item.replace(',', '')) for item in lane]
lane_num = lane[header_lane.index('Lane')]
for col in dict_lane:
stat = lane[dict_lane[col]['index']]
data_lane[col] = stat
lane_dict[lane_num] = data_lane
return data_run, lane_dict
except Exception, e:
print(e)
def laneBarcodeHTML(path):
"""Retrieve data from the laneBarcode.html page, the data is per barcode/sample per lane"""
try:
samples_dict = {}
dict_samples = {
'Lane': {'column': 'Lane'},
'Project': {'column': 'Project'},
'Sample_name': {'column': 'Sample'},
'Barcode_sequence': {'column': 'Barcode sequence'},
'PF_Clusters': {'column': 'PF Clusters'},
'PCT_of_lane': {'column': '% of the lane'},
'PCT_perfect_barcode': {'column': '% Perfect barcode'},
'PCT_one_mismatch_barcode': {'column': '% One mismatch barcode'},
'Yield_Mbases': {'column': 'Yield (Mbases)'},
'PCT_PF_Clusters': {'column': '% PF Clusters'},
'PCT_Q30_bases': {'column': '% = Q30 bases'},
'Mean_Quality_Score': {'column': 'Mean Quality Score'}
}
samplehtml = commands.getoutput('find {path}/Data/Intensities/BaseCalls/Reports/html/*/all/all/all/ -iname \'laneBarcode.html\''.format(
path=str(path)
))
with open(samplehtml, 'r') as sample:
html = sample.read()
tableParser = HTMLTableParser()
tableParser.feed(html)
tables = tableParser.tables # tables[1]==run tables[2]==sample
header_samplehtml = tables[2][0]
for col in dict_samples:
dict_samples[col]['index'] = header_samplehtml.index(dict_samples[col]['column'])
for sample_lane in tables[2][1:]:
data_sample_lane = {}
if sample_lane[header_samplehtml.index('Project')].upper() != 'DEFAULT':
stats = [convert_numbers(item.replace(',', '')) for item in sample_lane]
lane = stats[header_samplehtml.index('Lane')]
sample = stats[header_samplehtml.index('Sample')]
lane_sample = str(lane) + '--' + str(sample)
for col in dict_samples:
stat = stats[dict_samples[col]['index']]
data_sample_lane[col] = stat
samples_dict[lane_sample] = data_sample_lane
return samples_dict
except Exception, e:
print(e)
def vcf_file(path):
"""Retrieve data from a vcf file, for each sample the number of variants,
homo- and heterozygous, number of dbSNP variants and PASS variants is determained
"""
try:
dic_samples = {}
file_vcf = commands.getoutput('find {path}/ -maxdepth 1 -iname \'*.vcf\''.format(
path=str(path)
))
with open(file_vcf, 'r') as vcffile:
vcf_file = vcf.Reader(vcffile)
list_samples = vcf_file.samples
for sample in list_samples:
dic_samples[sample] = [0, 0, 0]
for variant in vcf_file:
samples = []
if 'DB'in variant.INFO:
DB = 1
else:
DB = 0
if not variant.FILTER:
PASS = 1
else:
PASS = 0
if variant.num_het != 0:
het_samples = variant.get_hets()
samples = [item.sample for item in het_samples]
if variant.num_hom_alt != 0:
hom_samples = [item.sample for item in variant.get_hom_alts()]
samples.extend(hom_samples)
for sample in samples:
stats = dic_samples[sample]
stats[0] += 1
stats[1] += DB
stats[2] += PASS
dic_samples[sample] = stats
return dic_samples
# dic_samples[sample name] = [number of variant, Percentage dbSNP variants from total, Percentage PASS variants from total]
except Exception, e:
print(e)
def runstat_file(path):
"""Retrieve data from the runstats file,
for each sample the percentage duplication is retrieved
"""
try:
sample_dup = {}
sample_total_reads = {}
sample_mapped_percentage = {}
runstats_file = commands.getoutput('find {path}/ -iname \'run_stats.txt\''.format(
path=str(path)
))
with open(runstats_file, 'r') as runstats:
run_stats = runstats.read()
run_stats = run_stats.split('working on ')
for sample in run_stats[1:]:
stats = sample.split('\n')
sample_name = stats[0].split('/')[-1]
sample_name = sample_name.replace('.flagstat...', '')
dup = 0
total_reads = 0
mapped_percentage = 0
for x in stats:
if 'total (QC-passed reads + QC-failed reads)' in x:
total_reads = int(x.strip().split()[0])
elif 'mapped (' in x:
mapped_percentage = float(x.split('(')[-1].split('%')[0])
elif '%duplication' in x:
dup = float(x.split('%')[0].strip('\t').strip())
dup = float('{0:.2f}'.format(dup))
sample_dup[sample_name] = dup
sample_total_reads[sample_name] = total_reads
sample_mapped_percentage[sample_name] = mapped_percentage
return sample_dup, sample_total_reads, sample_mapped_percentage
# sample_dup[sample name] = duplication
except Exception, e:
print(e)
def HSMetrics(path):
"""Retrieve data from the HSMetrics_summary.transposed file,
from this file all the data is transferred to a dictionary
"""
try:
sample_stats = {}
QCStats_file = commands.getoutput('find {path} -iname \'HSMetrics_summary.txt\''.format(
path=str(path)
))
dict_columns = {
'Sample_name': {'column': 'Sample'},
'Total_reads': {'column': 'TOTAL_READS'},
'PF_reads': {'column': 'PF_READS'},
'PF_unique_reads': {'column': 'PF_UNIQUE_READS'},
'PCT_PF_reads': {'column': 'PCT_PF_READS'},
'PCT_PF_UQ_reads': {'column': 'PCT_PF_UQ_READS'},
'PF_UQ_reads_aligned': {'column': 'PF_UQ_READS_ALIGNED'},
'PCT_PF_UQ_reads_aligned': {'column': 'PCT_PF_UQ_READS_ALIGNED'},
'PF_UQ_bases_aligned': {'column': 'PF_UQ_BASES_ALIGNED'},
'On_bait_bases': {'column': 'ON_BAIT_BASES'},
'Near_bait_bases': {'column': 'NEAR_BAIT_BASES'},
'Off_bait_bases': {'column': 'OFF_BAIT_BASES'},
'On_target_bases': {'column': 'ON_TARGET_BASES'},
'PCT_selected_bases': {'column': 'PCT_SELECTED_BASES'},
'PCT_off_bait': {'column': 'PCT_OFF_BAIT'},
'On_bait_vs_selected': {'column': 'ON_BAIT_VS_SELECTED'},
'Mean_bait_coverage': {'column': 'MEAN_BAIT_COVERAGE'},
'Mean_target_coverage': {'column': 'MEAN_TARGET_COVERAGE'},
'PCT_usable_bases_on_bait': {'column': 'PCT_USABLE_BASES_ON_BAIT'},
'PCT_usable_bases_on_target': {'column': 'PCT_USABLE_BASES_ON_TARGET'},
'Fold_enrichment': {'column': 'FOLD_ENRICHMENT'},
'Zero_CVG_targets_PCT': {'column': 'ZERO_CVG_TARGETS_PCT'},
'Fold_80_base_penalty': {'column': 'FOLD_80_BASE_PENALTY'},
'PCT_target_bases_2X': {'column': 'PCT_TARGET_BASES_2X'},
'PCT_target_bases_10X': {'column': 'PCT_TARGET_BASES_10X'},
'PCT_target_bases_20X': {'column': 'PCT_TARGET_BASES_20X'},
'PCT_target_bases_30X': {'column': 'PCT_TARGET_BASES_30X'},
'PCT_target_bases_40X': {'column': 'PCT_TARGET_BASES_40X'},
'PCT_target_bases_50X': {'column': 'PCT_TARGET_BASES_50X'},
'PCT_target_bases_100X': {'column': 'PCT_TARGET_BASES_100X'},
'HS_library_size': {'column': 'HS_LIBRARY_SIZE'},
'HS_penalty_10X': {'column': 'HS_PENALTY_10X'},
'HS_penalty_20X': {'column': 'HS_PENALTY_20X'},
'HS_penalty_30X': {'column': 'HS_PENALTY_30X'},
'HS_penalty_40X': {'column': 'HS_PENALTY_40X'},
'HS_penalty_50X': {'column': 'HS_PENALTY_50X'},
'HS_penalty_100X': {'column': 'HS_PENALTY_100X'},
'AT_dropout': {'column': 'AT_DROPOUT'},
'GC_dropout': {'column': 'GC_DROPOUT'},
'Bait_name': {'column': 'BAIT_SET'},
'Genome_Size': {'column': 'GENOME_SIZE'},
'Bait_territory': {'column': 'BAIT_TERRITORY'},
'Target_territory': {'column': 'TARGET_TERRITORY'},
'Bait_design_efficiency': {'column': 'BAIT_DESIGN_EFFICIENCY'}
}
col_to_pct = ['Bait_design_efficiency', 'PCT_PF_reads', 'PCT_PF_UQ_reads',
'PCT_PF_UQ_reads_aligned', 'PCT_selected_bases',
'PCT_off_bait', 'On_bait_vs_selected', 'PCT_usable_bases_on_bait',
'PCT_usable_bases_on_target', 'Zero_CVG_targets_PCT', 'PCT_target_bases_2X',
'PCT_target_bases_10X', 'PCT_target_bases_20X', 'PCT_target_bases_30X',
'PCT_target_bases_40X', 'PCT_target_bases_50X', 'PCT_target_bases_100X'
]
col_format = ['Mean_bait_coverage', 'Mean_target_coverage', 'Fold_enrichment',
'Fold_80_base_penalty', 'HS_penalty_10X', 'HS_penalty_20X',
'HS_penalty_30X', 'HS_penalty_40X', 'HS_penalty_50X',
'HS_penalty_100X', 'AT_dropout', 'GC_dropout'
]
with open(QCStats_file, 'r') as QCStats:
sample = []
qc_stats = QCStats.read().split('\n')
for line in qc_stats:
l = line.split('\t')
sample.append(l)
qc_table = [list(i) for i in map(None, *sample)]
qc_table[0][0] = 'Sample'
table_header = qc_table[0][:-1]
table_header = [item.replace(' ', '_') for item in table_header]
for col in dict_columns:
dict_columns[col]['index'] = table_header.index(dict_columns[col]['column'])
for stats in qc_table[1:]:
data_dict = {}
stats = stats[:-1] # there is a None at the end of each line
sample_name = stats[table_header.index('Sample')]
sample_name = sample_name.replace('_dedup', '')
for col in dict_columns:
if col == 'Percentage_reads_mapped':
stat = stats[dict_columns[col]['index']]
stat = float(stat.strip('%'))
data_dict[col] = stat
elif col in col_to_pct:
stat = stats[dict_columns[col]['index']]
stat = float(stat)*100
stat = float('{0:.2f}'.format(stat))
data_dict[col] = stat
elif col in col_format:
stat = float(stats[dict_columns[col]['index']])
stat = float('{0:.2f}'.format(stat))
data_dict[col] = stat
elif col == 'Sample_name':
data_dict[col] = sample_name
else:
data_dict[col] = stats[dict_columns[col]['index']]
sample_stats[sample_name] = data_dict
return sample_stats
except Exception, e:
print(e)
| 42.507163 | 144 | 0.527469 | [
"MIT"
] | UMCUGenetics/Trend_Analysis_tool | scripts/upload/data/import_data.py | 14,835 | Python |
"""
Consensus Algorithm for 2 Robots using MLP Model
Scene: Robot 1, Robot 3
Inputs: Mx, My
Outputs: Ux, Uy
"""
import torch
import MLP_Model
import math
import numpy as np
import rclpy
from rclpy.node import Node
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Float32
import time
L = 1
d = 0.5
# load model using dict
FILE = "model.pth"
loaded_model = MLP_Model.MLP()
loaded_model.load_state_dict(torch.load(FILE))
loaded_model.eval()
def euler_from_quaternion(x, y, z, w):
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return yaw_z # in radians
class MinimalPublisher(Node):
def __init__(self):
super().__init__('minimal_publisher1')
self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32
self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1',0) #Change according to topic in child script,String to Float32
self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32
self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3',0) #Change according to topic in child script,String to Float32
self.subscription = self.create_subscription(
TFMessage,
'/tf',
self.listener_callback,
0)
" Timer Callback "
timer_period = 0.1 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
" Parameters "
self.t = 0 # Just to intialized Phix's and Phiy's
" Initialize Phi's"
self.Phix1 = 0# 1x1
self.Phiy1 = 0 # 1x1
self.Phix3 = 0 # 1x1
self.Phiy3 = 0 # 1x1
" Mobile Robot 1 Parameters "
self.x1 = 0
self.y1 = 0
self.Theta1 = 0
self.v1 = 0
self.w1 = 0
self.vL1 = 0
self.vR1 = 0
" Mobile Robot 3 Parameters "
self.x3 = 0
self.y3 = 0
self.Theta3 = 0
self.v3 = 0
self.w3 = 0
self.vL3 = 0
self.vR3 = 0
def timer_callback(self):
" Calculate Mx1, My1, ...... Mx6, My6 "
# Initialize Phi's
if self.t ==0:
self.Phix1 = 0 # 1x1
self.Phiy1 = 0 # 1x1
self.Phix3 = 0 # 1x1
self.Phiy3 = 0 # 1x1
self.t += 1
Mx1 = self.x3 - self.x1
My1 = self.y3 - self.y1
Mx3 = self.x1 - self.x3
My3 = self.y1 - self.y3
" Use MLP to Predict control inputs "
relative_pose_1 = [ Mx1, My1, self.Phix1, self.Phiy1 ] # tensor data for MLP model
relative_pose_3 = [ Mx3, My3, self.Phix3, self.Phiy3 ] # tensor data for MLP model
u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model) # predict control input u1, tensor
u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) # predict control input u2, tensor
self.Phix1 = u3_predicted[0][0] # 1x1
self.Phiy1 = u3_predicted[0][1] # 1x1
self.Phix3 = u1_predicted[0][0] # 1x1
self.Phiy3 = u1_predicted[0][1] # 1x1
u1_predicted_np = np.array([[ u1_predicted[0][0] ], [ u1_predicted[0][1] ]]) # from tensor to numpy array for calculation
u3_predicted_np = np.array([[ u3_predicted[0][0] ], [ u3_predicted[0][1] ]]) # from tensor to numpy array for calculation
" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 "
S1 = np.array([[self.v1], [self.w1]]) #2x1
G1 = np.array([[1,0], [0,1/L]]) #2x2
R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]]) #2x2
S1 = np.dot(np.dot(G1, R1), u1_predicted_np) #2x1
S3 = np.array([[self.v3], [self.w3]]) #2x1
G3 = np.array([[1,0], [0,1/L]]) #2x2
R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]]) #2x2
S3 = np.dot(np.dot(G3, R3), u3_predicted_np) # 2x1
" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 "
D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2
Di = np.linalg.inv(D) #2x2
Speed_L1 = np.array([[self.vL1], [self.vR1]]) # Vector 2x1 for Speed of Robot 1
Speed_L3 = np.array([[self.vL3], [self.vR3]]) # Vector 2x1 for Speed of Robot 3
M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1) #2x1
M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1
Speed_L1 = np.dot(Di, M1) # 2x1 (VL1, VR1)
Speed_L3 = np.dot(Di, M3) # 2x1 (VL1, VR1)
VL1 = float(Speed_L1[0])
VR1 = float(Speed_L1[1])
VL3 = float(Speed_L3[0])
VR3 = float(Speed_L3[1])
" Publish Speed Commands to Robot 1 "
msgl1 = Float32()
msgr1 = Float32()
msgl1.data = VL1
msgr1.data = VR1
self.publisher_l1.publish(msgl1)
self.publisher_r1.publish(msgr1)
" Publish Speed Commands to Robot 3 "
msgl3 = Float32()
msgr3 = Float32()
msgl3.data = VL3
msgr3.data = VR3
self.publisher_l3.publish(msgl3)
self.publisher_r3.publish(msgr3)
self.i += 1
def listener_callback(self, msg):
if msg.transforms[0].child_frame_id == 'robot1' :
self.x1 = msg.transforms[0].transform.translation.x
self.y1 = msg.transforms[0].transform.translation.y
self.xr1 = msg.transforms[0].transform.rotation.x
self.yr1 = msg.transforms[0].transform.rotation.y
self.zr1 = msg.transforms[0].transform.rotation.z
self.wr1 = msg.transforms[0].transform.rotation.w
self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)
if msg.transforms[0].child_frame_id == 'robot3' :
self.x3 = msg.transforms[0].transform.translation.x
self.y3 = msg.transforms[0].transform.translation.y
self.xr3 = msg.transforms[0].transform.rotation.x
self.yr3 = msg.transforms[0].transform.rotation.y
self.zr3 = msg.transforms[0].transform.rotation.z
self.wr3 = msg.transforms[0].transform.rotation.w
self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3)
def main(args=None):
rclpy.init(args=args)
minimal_publisher = MinimalPublisher()
time.sleep(5)
rclpy.spin(minimal_publisher)
minimal_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 34.22549 | 157 | 0.572615 | [
"MIT"
] | HusseinLezzaik/Consensus-Algorithm-for-2-Mobile-Robots | Real Topology Graph/GNN Model 2/Cyclic Graph/test_n2_robot3.py | 6,982 | Python |
# pylint: disable=missing-docstring
# pylint: disable=unbalanced-tuple-unpacking
import os
from resdk.tests.functional.base import BaseResdkFunctionalTest
class TestUpload(BaseResdkFunctionalTest):
def get_samplesheet(self):
"""Return path of an annotation samplesheet."""
files_path = os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'..',
'files',
)
)
samplesheet_name = 'annotation_spreadsheet.xlsm'
return os.path.join(files_path, samplesheet_name)
def test_annotate(self):
# Create the collection with named, unannotated samples
collection = self.res.collection.create(name='Test annotate collection')
reads_1, reads_2, reads_2b, reads_4, reads_5 = self.get_reads(5, collection)
bam = self.get_bams(1, collection)[0]
# Two different samples
sample_1 = reads_1.sample
sample_1.name = 'Sample 1'
sample_1.save()
sample_2 = reads_2.sample
sample_2.name = 'Sample 2'
sample_2.save()
# A duplicated sample
sample_2b = reads_2b.sample
sample_2b.name = 'Sample 2'
sample_2b.save()
# A sample derived from an alignment file
sample_3 = bam.sample
sample_3.name = 'Sample 3'
sample_3.save()
# Missing organism
sample_4 = reads_4.sample
sample_4.name = 'missing organism'
sample_4.save()
# Missing source
sample_5 = reads_5.sample
sample_5.name = 'missing source'
sample_5.save()
# Apply the sample annotations from a local spreadsheet
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.annotate(samplesheet)
# Check the error logging
self.assertEqual(len(logs.output), 14)
# Invalid annotations are individually logged and described
samplesheet_errprefix = "ERROR:resdk.data_upload.samplesheet:"
samplesheet_errors = [
"For the sample, '', '' is not a valid SAMPLE_NAME.",
"For the sample, 'missing annotator', '' is not a valid ANNOTATOR.",
"For the sample, 'missing organism', '' is not a valid ORGANISM.",
"For the sample, 'missing source', '' is not a valid SOURCE.",
"For the sample, 'missing molecule', '' is not a valid MOLECULE.",
"For the sample, 'missing seq_type', '' is not a valid SEQ_TYPE.",
"The sample name 'duplicated sample' is duplicated. Please use "
"unique sample names.",
]
for error in samplesheet_errors:
message = samplesheet_errprefix + error
self.assertIn(message, logs.output)
# All samples with invalid annotations are listed
invalid_samples = [
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'duplicated sample',
]
for invalid in invalid_samples:
self.assertIn(invalid, logs.output[7])
# Samples not explictly added should be missing (just check a few)
missing_samples = [
'single-reads',
'paired-reads',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing molecule',
'missing seq_type',
'duplicated sample',
]
for missing in missing_samples:
self.assertIn(missing, logs.output[8])
# But don't claim they're missing when they're not
present_samples = ['Sample 1', 'Sample 2', 'Sample 3',
'missing organism', 'missing source']
for present in present_samples:
self.assertNotIn(present, logs.output[8])
# Duplicate samples raise an error
duplicate_error = ("ERROR:resdk.data_upload.annotate_samples:"
"Multiple samples are queried by the name 'Sample 2'"
". Annotation will not be applied.")
self.assertIn(duplicate_error, logs.output)
# Annotations from the example sheet for Samples 1, 2, and 3
ann_1 = {
'sample': {
'genotype': 'ANIMAL 1:\xa0PBCAG-FUS1, PBCAG-eGFP, PBCAG-mCherry,'
' GLAST-PBase,\xa0PX330-P53',
'cell_type': 'Mixed',
'optional_char': [
'AGE:38 days',
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:2x75 bp',
'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
' location. Easily isolated sample',
'TISSUE:Tumor',
],
'strain': '',
'source': 'Tumor',
'organism': 'Rattus norvegicus',
'molecule': 'total RNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
ann_2 = {}
# # Restore if duplicate samples may be annotated.
# ann_2 = {
# 'sample': {
# 'genotype': '',
# 'cell_type': 'Mixed',
# 'optional_char': [
# 'LIBRARY_STRATEGY:Illumina Standard Prep ',
# 'OTHER_CHAR_1:2x75 bp',
# 'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
# ' location. Easily isolated sample',
# 'TISSUE:Tumor',
# ],
# 'strain': '',
# 'source': 'Tumor',
# 'organism': 'Homo sapiens',
# 'molecule': 'total RNA',
# 'annotator': 'Tristan Brown',
# 'description': '',
# }
# }
ann_3 = {
'sample': {
'genotype': 'AX4',
'cell_type': '',
'optional_char': [
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:300 bp',
],
'strain': 'Non-aggregating',
'source': 'Cell',
'organism': 'Dictyostelium discoideum',
'molecule': 'genomic DNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
reads_ann_1 = {
'experiment_type': 'RNA-Seq',
'protocols': {'antibody_information': {'manufacturer': ''},
'extract_protocol': 'Standard',
'fragmentation_method': '',
'growth_protocol': 'Standard media',
'library_prep': 'Illumina',
'treatment_protocol': 'Control'},
'reads_info': {'barcode': '', 'facility': '', 'instrument_type': ''},
}
# Check the actual annotation data
sample_1.update()
sample_2.update()
sample_2b.update()
sample_3.update()
self.assertEqual(sample_1.descriptor, ann_1)
self.assertEqual(sample_1.data[0].descriptor, reads_ann_1)
self.assertEqual(sample_1.tags, ['community:rna-seq'])
self.assertEqual(sample_2.descriptor, ann_2)
self.assertEqual(sample_2b.descriptor, ann_2)
self.assertEqual(sample_3.descriptor, ann_3)
self.assertEqual(sample_4.descriptor, {})
self.assertEqual(sample_5.descriptor, {})
def test_export(self):
# Create the collection with named, unannotated samples
collection = self.res.collection.create(name='Test export annotation')
reads_1, reads_2 = self.get_reads(2, collection)
# Two different samples
sample_1 = reads_1.sample
sample_1.name = 'Sample 1'
sample_1.save()
sample_2 = reads_2.sample
sample_2.name = 'Sample 2'
ann_2 = {
'sample': {
'genotype': '',
'cell_type': 'Mixed',
'optional_char': [
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:2x75 bp',
'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
' location. Easily isolated sample',
'TISSUE:Tumor',
],
'strain': 'N/A',
'source': 'Tumor',
'organism': 'Homo sapiens',
'molecule': 'total RNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
sample_2.descriptor_schema = 'sample'
sample_2.descriptor = ann_2
sample_2.save()
reads_ann = {
'experiment_type': 'RNA-Seq',
'protocols': {
'growth_protocol': 'N/A',
'treatment_protocol': 'Control',
}
}
reads_2.descriptor_schema = 'reads'
reads_2.descriptor = reads_ann
reads_2.save()
# Export the new template
filepath = 'annotation_template1.xlsm'
try:
os.remove(filepath)
except OSError:
pass
with self.assertLogs() as logs:
collection.export_annotation(filepath)
assert os.path.exists(filepath)
# TODO: Find a robust hash check for .xls* files
os.remove(filepath)
# Check the error logging
self.assertEqual(len(logs.output), 3)
not_annotated = ("WARNING:resdk.data_upload.samplesheet:Sample 'Sample 1'"
" reads not annotated.")
self.assertIn(not_annotated, logs.output)
location = ("INFO:resdk.data_upload.annotate_samples:\nSample annotation"
" template exported to annotation_template1.xlsm.\n")
self.assertIn(location, logs.output)
def test_upload_reads(self):
# Create a collection, find the samplesheet, and upload the reads
collection = self.res.collection.create(name='Test upload reads')
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.upload_reads(samplesheet, basedir='files')
# Check the error logging
self.assertEqual(len(logs.output), 37)
upload_errprefix = "ERROR:resdk.data_upload.reads:"
# Examples of each upload error case:
upload_errs = [
"Skipping upload of 'Sample 1': No forward reads given.",
"File /storage/61_cat_R1_001.fastq.gz not found.",
"File /storage/63_cat_R1_001.fastq.gz not found.",
"Skipping upload of '01_1-1_IP_plus': Invalid file extension(s). "
"(Options: .fq, .fastq)",
"Skipping upload of 'missing barcode': Invalid file extension(s). "
"(Options: .fq, .fastq)",
"Skipping upload of 'bad extension': Invalid file extension(s). "
"(Options: .fq, .fastq)",
]
for error in upload_errs:
message = upload_errprefix + error
self.assertIn(message, logs.output)
# All samples that can't be uploaded are listed
upload_fail = [
'Sample 1',
'Sample 2',
'Sample 3',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'01_1-1_IP_plus',
'02_1-1_IP_minus',
'missing barcode',
'other barcode',
'01_1-1_IP_plus2',
'02_1-1_IP_minus2',
'duplicated sample',
'invalid qseq',
'invalid qseq2',
'bad extension',
]
for invalid in upload_fail:
self.assertIn(invalid, logs.output[31])
# Samples not uploaded should be missing
for missing in upload_fail:
self.assertIn(missing, logs.output[32])
# Don't claim it's invalid or missing if it was uploaded
upload_success = ['single-reads', 'paired-reads']
for uploaded in upload_success:
self.assertNotIn(uploaded, logs.output[31])
self.assertNotIn(uploaded, logs.output[32])
# Check the data objects
names = [sample.name for sample in collection.samples]
self.assertIn('single-reads', names)
self.assertIn('paired-reads', names)
# Try to duplicate the upload and fail
with self.assertLogs() as logs2:
collection.upload_reads(samplesheet, basedir='files')
already_up = [
"Skipping upload of 'single-reads': File already uploaded.",
"Skipping upload of 'paired-reads': File already uploaded.",
]
for error in already_up:
message = upload_errprefix + error
self.assertIn(message, logs2.output)
self.assertEqual(len(collection.data), 2)
self.assertEqual(len(collection.samples), 2)
# TODO: Cannot test this part because processes do not complete on Jenkins
# TODO: Check sample files and annotations in resolwe-bio when possible
# sample1 = collection.samples.get(name='single-reads')
# sample2 = collection.samples.get(name='paired-reads')
# wait_process_complete(sample1.data[0], 1, 10)
# wait_process_complete(sample2.data[0], 1, 10)
# file0 = 'reads.fastq.gz'
# file1 = 'reads_paired_abyss_1.fastq.gz'
# file2 = 'reads_paired_abyss_2.fastq.gz'
# self.assertIn(file0, sample1.files())
# self.assertIn(file1, sample2.files())
# self.assertIn(file2, sample2.files())
# self.assertEqual(sample1.descriptor['sample']['organism'], 'Mus musculus')
# self.assertEqual(sample2.descriptor['sample']['organism'], 'Rattus norvegicus')
# Test export of the annotated template
filepath = 'annotation_template2.xlsm'
try:
os.remove(filepath)
except OSError:
pass
collection.export_annotation(filepath)
assert os.path.exists(filepath)
# TODO: Find a robust hash check for .xls* files
os.remove(filepath)
def test_upload_multiplexed(self):
# Create a collection, find the samplesheet, and upload the reads
collection = self.res.collection.create(name='Test upload multiplexed')
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.upload_demulti(samplesheet, basedir='files')
# Check the error logging
self.assertEqual(len(logs.output), 39)
upload_errprefix = "ERROR:resdk.data_upload.multiplexed:"
# Examples of each upload error case:
upload_errs = [
"Skipping upload of 'reads.fastq.gz': No barcodes file given.",
"Skipping upload of 'reads_paired_abyss_1.fastq.gz': "
"No barcodes file given.",
"Skipping upload of '': No forward reads given.",
"Skipping upload of 'dummy.qseq': Missing barcode.",
"Skipping upload of 'pool24.read1.small.fastq.bz2': Invalid file "
"extension(s). (Options: .qseq)",
"Skipping upload of 'pool24c.read1.small.qseq.bz2': Invalid file "
"extension(s). (Options: .qseq)",
"Demultiplex process not yet complete for 'pool24.read1.small.qseq.bz2'.",
]
for error in upload_errs:
message = upload_errprefix + error
self.assertIn(message, logs.output)
# All samples that can't be uploaded are listed
upload_fail = [
'single-reads',
'paired-reads',
'Sample 1',
'Sample 2',
'Sample 3',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'missing barcode',
'other barcode',
'01_1-1_IP_plus2',
'02_1-1_IP_minus2',
'duplicated sample',
'invalid qseq',
'invalid qseq2',
'bad extension',
]
for invalid in upload_fail:
self.assertIn(invalid, logs.output[35])
# Samples not uploaded should be missing
for missing in upload_fail:
self.assertIn(missing, logs.output[36])
# Don't claim it's invalid if it was uploaded
upload_success = ['01_1-1_IP_plus,', '02_1-1_IP_minus,']
for uploaded in upload_success:
self.assertNotIn(uploaded, logs.output[35])
# Check the file is actually uploaded
names = [data.name for data in collection.data]
qseq = 'pool24.read1.small.qseq.bz2'
self.assertIn(qseq, names)
# Try to duplicate the upload and fail
with self.assertLogs() as logs2:
collection.upload_demulti(samplesheet, basedir='files')
already_up = (
upload_errprefix
+ "Skipping upload of 'pool24.read1.small.qseq.bz2': File already uploaded."
)
self.assertIn(already_up, logs2.output)
names = [data.name for data in collection.data]
names.remove(qseq)
self.assertNotIn(qseq, names)
# TODO: Cannot test this part because processes do not complete on Jenkins
# TODO: Check sample files and annotations in resolwe-bio when possible
# for data in collection.data:
# wait_process_complete(data, 1, 10) # process the .qseq upload
# collection.update()
# for data in collection.data:
# wait_process_complete(data, 1, 10) # process the demultiplexed child data
# collection.upload_demulti(samplesheet)
# collection.update()
# # Check the uploaded data and created samples
# self.assertEqual(len(collection.data), 5)
# self.assertEqual(len(collection.samples), 4)
# names = {sample.name for sample in collection.samples}
# self.assertIn('01_1-1_IP_plus', names)
# self.assertIn('02_1-1_IP_minus', names)
# sample1 = collection.samples.get(name='01_1-1_IP_plus')
# sample2 = collection.samples.get(name='02_1-1_IP_minus')
# file1 = 'pool24_01_1-1_IP_plus_TCGCAGG_mate1.fastq.gz'
# file2 = 'pool24_02_1-1_IP_minus_CTCTGCA_mate2.fastq.gz'
# self.assertIn(file1, sample1.files())
# self.assertIn(file2, sample2.files())
# self.assertEqual(sample1.descriptor['sample']['source'], 'Tumor')
# self.assertEqual(sample2.descriptor['sample']['source'], 'Control')
| 38.418182 | 89 | 0.560288 | [
"Apache-2.0"
] | tristanbrown/resolwe-bio-py | resdk/tests/functional/data_upload/e2e_upload.py | 19,017 | Python |
HUME_UUID = "9cb37270-69f5-4dc0-9fd5-7183da5ffc19"
DEVICE_UUID_1 = "e2bf93b6-9b5d-4944-a863-611b6b6600e7"
DEVICE_UUID_2 = "e2bf93b6-9b5d-4944-a863-611b6b6600e1"
DEVICE_UUID_3 = "e2bf93b6-9b5d-4944-a863-611b6b6600e2"
BASIC_LED_CAPS = {
'uuid': DEVICE_UUID_1,
'name': 'Basic LED',
'category': 1,
'type': 1,
'states': [
{
'id': 0,
'control': [{'on': 1}, {'off': 0}]
}
]
}
| 24.111111 | 54 | 0.589862 | [
"MIT"
] | megacorpincorporated/hint | backend/device/test_defs.py | 434 | Python |
import logging
import pandas as pd
import pyodbc
from django.db import models
from fernet_fields import EncryptedCharField
from datastores.mixins import GetConnectionMixin
from datastores.util import get_query, structure_tables_views
logger = logging.getLogger(__name__)
class AzureDatastore(GetConnectionMixin, models.Model):
database_name = models.CharField(max_length=128)
username = models.CharField(max_length=128)
password = EncryptedCharField(max_length=128)
host = models.CharField(max_length=300)
port = models.IntegerField(default=1433)
def __init__(self, *args, **kwargs):
super(AzureDatastore, self).__init__(*args, **kwargs)
self._connection_string = f'DRIVER={pyodbc.drivers()[-1]};SERVER={self.host};PORT={self.port};' \
f'DATABASE={self.database_name};UID={self.username};' \
f'PWD={self.password}'
def __str__(self):
return self.database_name
def get_connection_details(self):
return f"Database: {self.database_name}, Host: {self.host}, Port: {self.port}"
def _execute_query(self, query, get_column_names=False, commit=False):
try:
connection = pyodbc.connect(self._connection_string)
cursor = connection.cursor()
cursor.execute(query)
logger.info(f"Executed '{query}' on {self} (Azure).")
if commit:
cursor.commit()
else:
rows = cursor.fetchall()
if get_column_names:
return rows, [col[0] for col in cursor.description]
else:
return rows
except Exception as error:
logger.error(f"Error while executing '{query}' on {self} (Azure): {error}")
def is_connection_valid(self):
try:
pyodbc.connect(self._connection_string)
return True
except (pyodbc.DatabaseError, pyodbc.InterfaceError):
return False
def get_viable_datasets(self):
table_query = "SELECT schema_name(schema_id) as schema_name, t.name " \
"FROM sys.tables t " \
"ORDER BY schema_name;"
view_query = "SELECT schema_name(schema_id) as schema_name, name " \
"FROM sys.views " \
"WHERE schema_name(schema_id) != 'sys'" \
"ORDER BY schema_name;"
return structure_tables_views(self._execute_query(table_query), self._execute_query(view_query))
def retrieve_data(self, dataset=None, query=None, limit=None):
query = get_query(dataset, query)
if "TOP" not in query and limit:
limit_query = f" TOP {limit}"
select_query_index = query.index("SELECT") + len("SELECT")
query = query[:select_query_index] + limit_query + query[select_query_index:]
data, column_names = self._execute_query(query, get_column_names=True)
return pd.DataFrame([tuple(row) for row in data], columns=column_names)
| 37.096386 | 105 | 0.624553 | [
"BSD-2-Clause"
] | harry-consulting/SAEF1 | saefportal/datastores/models/azure_datastore.py | 3,079 | Python |
from typing import Dict
import pyarrow as pa
import regex as re
from feast import ValueType
def trino_to_feast_value_type(trino_type_as_str: str) -> ValueType:
type_map: Dict[str, ValueType] = {
"tinyint": ValueType.INT32,
"smallint": ValueType.INT32,
"int": ValueType.INT32,
"integer": ValueType.INT32,
"bigint": ValueType.INT64,
"double": ValueType.DOUBLE,
"decimal": ValueType.FLOAT,
"timestamp": ValueType.UNIX_TIMESTAMP,
"char": ValueType.STRING,
"varchar": ValueType.STRING,
"boolean": ValueType.BOOL,
}
return type_map[trino_type_as_str.lower()]
def pa_to_trino_value_type(pa_type_as_str: str) -> str:
# PyArrow types: https://arrow.apache.org/docs/python/api/datatypes.html
# Trino type: https://trino.io/docs/current/language/types.html
pa_type_as_str = pa_type_as_str.lower()
trino_type = "{}"
if pa_type_as_str.startswith("list"):
trino_type = "array<{}>"
pa_type_as_str = re.search(r"^list<item:\s(.+)>$", pa_type_as_str).group(1)
if pa_type_as_str.startswith("date"):
return trino_type.format("date")
if pa_type_as_str.startswith("timestamp"):
if "tz=" in pa_type_as_str:
return trino_type.format("timestamp with time zone")
else:
return trino_type.format("timestamp")
if pa_type_as_str.startswith("decimal"):
return trino_type.format(pa_type_as_str)
type_map = {
"null": "null",
"bool": "boolean",
"int8": "tinyint",
"int16": "smallint",
"int32": "int",
"int64": "bigint",
"uint8": "smallint",
"uint16": "int",
"uint32": "bigint",
"uint64": "bigint",
"float": "double",
"double": "double",
"binary": "binary",
"string": "varchar",
}
return trino_type.format(type_map[pa_type_as_str])
_TRINO_TO_PA_TYPE_MAP = {
"null": pa.null(),
"boolean": pa.bool_(),
"date": pa.date32(),
"tinyint": pa.int8(),
"smallint": pa.int16(),
"integer": pa.int32(),
"bigint": pa.int64(),
"double": pa.float64(),
"binary": pa.binary(),
"char": pa.string(),
}
def trino_to_pa_value_type(trino_type_as_str: str) -> pa.DataType:
trino_type_as_str = trino_type_as_str.lower()
_is_list: bool = False
if trino_type_as_str.startswith("array"):
_is_list = True
trino_type_as_str = re.search(r"^array\((\w+)\)$", trino_type_as_str).group(1)
if trino_type_as_str.startswith("decimal"):
search_precision = re.search(
r"^decimal\((\d+)(?>,\s?\d+)?\)$", trino_type_as_str
)
if search_precision:
precision = int(search_precision.group(1))
if precision > 32:
pa_type = pa.float64()
else:
pa_type = pa.float32()
elif trino_type_as_str.startswith("timestamp"):
pa_type = pa.timestamp("us")
elif trino_type_as_str.startswith("varchar"):
pa_type = pa.string()
else:
pa_type = _TRINO_TO_PA_TYPE_MAP[trino_type_as_str]
if _is_list:
return pa.list_(pa_type)
else:
return pa_type
| 28.839286 | 86 | 0.603406 | [
"Apache-2.0"
] | Agent007/feast | sdk/python/feast/infra/offline_stores/contrib/trino_offline_store/trino_type_map.py | 3,230 | Python |
from psutil import virtual_memory, cpu_freq, disk_usage, cpu_percent, cpu_count, swap_memory
from subprocess import check_output
#from termcolor import colored
from colorhex import colorex, BOLD, BLINKING, UNDERLINE, ITALIC
from time import sleep
from sys import exit
from os import system
CPU_MODEL = check_output("lscpu | grep 'Model name' | cut -f 2 -d ':' | awk '{$1=$1}1' | tr '\n', ' '", shell=True, text=True)
ARCH_TYPE = check_output("uname -m | tr '\n', ' '", shell=True, text=True)
def convertbytes(fsize, units=[' bytes',' KB',' MB',' GB',' TB', ' PB', ' EB']):
return "{:.2f}{}".format(float(fsize), units[0]) if fsize < 1024 else convertbytes(fsize / 1024, units[1:])
system('clear')
print(colorex(' -- PyMonitr -- ', '7289da', style=BOLD))
sleep(1)
system('clear')
while True:
ram = virtual_memory()
cpu = cpu_freq(percpu=True)
storage = disk_usage('/')
swap = swap_memory()
total_ram = convertbytes(ram[0])
used_ram = ram[0] - ram[1]
free_ram = convertbytes(ram[1])
percent_used_ram = ram.percent
percent_free_ram = abs(percent_used_ram)
total_swap = convertbytes(swap[0])
used_swap = convertbytes(swap[1])
free_swap = convertbytes(swap[2])
percent_used_swap = swap.percent
percent_free_swap = abs(percent_used_swap)
total_storage = convertbytes(storage[0])
used_storage = convertbytes(storage[1])
free_storage = convertbytes(storage[2])
percent_used_storage = storage.percent
percent_free_storage = abs(percent_used_storage)
print(colorex('|===== [Storage] =====|', 'CAD3c8', style=BOLD))
print(colorex(f'Total: {total_storage}', '7289da', style=BOLD))
if int(percent_used_storage) in range(0, 50):
print(colorex(f'Used: {used_storage} | {percent_used_storage}%', '43b581', style=BOLD))
elif int(percent_used_storage) in range(50, 80):
print(colorex(f'Used: {used_storage} | {percent_used_storage}%', 'fdcc4b', style=BOLD))
elif int(percent_used_storage) in range(80, 90):
print(colorex(f'Used: {used_storage} | {percent_used_storage}%', 'fa8231', style=BOLD))
elif int(percent_used_storage) in range(90, 101):
print(colorex(f'Used: {used_storage} | {percent_used_storage}% | ', 'f04947', style=BOLD), end=colorex('STORAGE USAGE IS TOO HIGH!!\n', 'f04947', style=[BLINKING, BOLD]))
if int(percent_free_storage) in range(0, 50):
print(colorex(f'Free: {free_storage} | {percent_free_storage - 100:.1f}%', '43b581', style=BOLD).replace('-', ''))
elif int(percent_free_storage) in range(50, 80):
print(colorex(f'Free: {free_storage} | {percent_free_storage - 100:.1f}%', 'fdcc4b', style=BOLD).replace('-', ''))
elif int(percent_free_storage) in range(80, 90):
print(colorex(f'Free: {free_storage} | {percent_free_storage - 100:.1f}%', 'fa8231', style=BOLD).replace('-', ''))
elif int(percent_free_storage) in range(90, 101):
print(colorex(f'Free: {free_storage} | {percent_free_storage - 100:.1f}% | ', 'f04947', style=BOLD), end=colorex('LOW STORAGE SPACE AVAILABLE!!\n', 'f04947', style=[BLINKING, BOLD]))
print(colorex('|======= [RAM] =======|', 'CAD3c8', style=BOLD))
print(colorex(f'Total: {total_ram}', '7289da', style=BOLD))
if int(percent_used_ram) in range(0, 50):
print(colorex(f'Used: {convertbytes(used_ram)} | {percent_used_ram}%', '43b581', style=BOLD))
elif int(percent_used_ram) in range(50, 80):
print(colorex(f'Used: {convertbytes(used_ram)} | {percent_used_ram}%', 'fdcc4b', style=BOLD))
elif int(percent_used_ram) in range(80, 90):
print(colorex(f'Used: {convertbytes(used_ram)} | {percent_used_ram}%', 'fa8231', style=BOLD))
elif int(percent_used_ram) in range(90, 101):
print(colorex(f'Used: {convertbytes(used_ram)} | {percent_used_ram}% | ', 'f04947', style=BOLD), end=colorex('RAM USAGE IS TOO HIGH!!\n', 'f04947', style=[BLINKING, BOLD]))
if int(percent_free_ram) in range(0, 50):
print(colorex(f'Free: {free_ram} | {percent_free_ram - 100:.1f}%', '43b581', style=BOLD).replace('-', ''))
elif int(percent_free_ram) in range(50, 80):
print(colorex(f'Free: {free_ram} | {percent_free_ram - 100:.1f}%', 'fdcc4b', style=BOLD).replace('-', ''))
elif int(percent_free_ram) in range(80, 90):
print(colorex(f'Free: {free_ram} | {percent_free_ram - 100:.1f}%', 'fa8231', style=BOLD).replace('-', ''))
elif int(percent_free_ram) in range(90, 101):
print(colorex(f'Free: {free_ram} | {percent_free_ram - 100:.1f}% | ', 'f04947', style=BOLD), end=colorex('LOW RAM MEMORY AVAILABLE!!\n', 'f04947', style=[BLINKING, BOLD]))
print(colorex('|====== [Swap] =======|', 'CAD3c8', style=BOLD))
print(colorex(f'Total: {total_swap}', '7289da', style=BOLD))
if int(percent_used_swap) in range(0, 50):
print(colorex(f'Used: {used_swap} | {percent_used_swap}%', '43b581', style=BOLD))
elif int(percent_used_swap) in range(50, 80):
print(colorex(f'Used: {used_swap} | {percent_used_swap}%', 'fdcc4b', style=BOLD))
elif int(percent_used_swap) in range(80, 90):
print(colorex(f'Used: {used_swap} | {percent_used_swap}%', 'fa8231', style=BOLD))
elif int(percent_used_swap) in range(90, 101):
print(colorex(f'Used: {used_swap} | {percent_used_swap}% | ', 'f04947', style=BOLD), end=colorex('SWAP USAGE IS TOO HIGH!!\n', 'f04947', style=[BLINKING, BOLD]))
if int(percent_free_swap) in range(0, 50):
print(colorex(f'Free: {free_swap} | {percent_free_swap - 100:.1f}%', '43b581', style=BOLD).replace('-', ''))
elif int(percent_free_swap) in range(50, 80):
print(colorex(f'Free: {free_swap} | {percent_free_swap - 100:.1f}%', 'fdcc4b', style=BOLD).replace('-', ''))
elif int(percent_free_swap) in range(80, 90):
print(colorex(f'Free: {free_swap} | {percent_free_swap - 100:.1f}%', 'fa8231', style=BOLD).replace('-', ''))
elif int(percent_free_swap) in range(90, 101):
print(colorex(f'Free: {free_swap} | {percent_free_swap - 100:.1f}% | ', 'f04947', style=BOLD), end=colorex('LOW SWAP MEMORY AVAILABLE!!\n', 'f04947', style=[BLINKING, BOLD]))
print(colorex('|======= [CPU] =======|', 'CAD3c8', style=BOLD))
cpu_perc = cpu_percent()
if int(cpu_perc) in range(0, 50):
print(colorex(f'Usage: {cpu_perc}%', '43b581', style=BOLD))
elif int(cpu_perc) in range(50, 80):
print(colorex(f'Usage: {cpu_perc}%', 'fdcc4b', style=BOLD))
elif int(cpu_perc) in range(80, 90):
print(colorex(f'Usage: {cpu_perc}%', 'fa8231', style=BOLD))
elif int(cpu_perc) in range(90, 101):
print(colorex(f'Usage: {cpu_perc}% | ', 'f04947', style=BOLD), end=colorex('CPU USAGE IS TOO HIGH!!\n', 'f04947', style=[BLINKING, BOLD]))
print(colorex(f'Total cores: {cpu_count()}', '82589f', style=BOLD))
print(colorex(f'Architecture type: {ARCH_TYPE}', 'ccae62', style=BOLD))
print(colorex(f'CPU model/name: {CPU_MODEL}', 'FEA47F', style=BOLD))
try:
sleep(1)
except KeyboardInterrupt:
exit()
system('clear') | 44.766234 | 185 | 0.669423 | [
"MIT"
] | devlocalhost/pymonitr | pymn_v6.py | 6,894 | Python |
import signal
import sys
import logging
import decimal
import datetime
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (
DeleteRowsEvent,
UpdateRowsEvent,
WriteRowsEvent,
)
def mysql_stream(conf, mongo, queue_out):
logger = logging.getLogger(__name__)
# server_id is your slave identifier, it should be unique.
# set blocking to True if you want to block and wait for the next event at
# the end of the stream
# mysql 基础配置
mysql_settings = {
"host": conf['host'],
"port": conf.getint('port'),
"user": conf['user'],
"passwd": conf['password']
}
res_dict = dict()
dbs = conf['databases'].split(",")
tables = ['already_table']
for db in dbs:
for table in tables:
res_dict.update({table: mongo.get_log_pos(db, table)})
for db in dbs:
for table in tables:
log_file, log_pos, resume_stream = res_dict.get(table)
stream = BinLogStreamReader(connection_settings=mysql_settings,
server_id=conf.getint('slaveid'),
only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
blocking=True,
resume_stream=resume_stream,
log_file=log_file,
log_pos=log_pos,
only_tables=table, # 只查询当前表的事件
only_schemas=db) # 只查询当前的数据库
for binlogevent in stream:
binlogevent.dump()
schema = "%s" % binlogevent.schema
table = "%s" % binlogevent.table
# event_type, vals = None, None
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
vals = process_binlog_dict(row["values"])
event_type = 'delete'
elif isinstance(binlogevent, UpdateRowsEvent):
vals = dict()
vals["before"] = process_binlog_dict(row["before_values"])
vals["after"] = process_binlog_dict(row["after_values"])
event_type = 'update'
elif isinstance(binlogevent, WriteRowsEvent):
vals = process_binlog_dict(row["values"])
event_type = 'insert'
# 将事件类型和记录数值写入数据库中 将返回的 _id 当做序列号返回
seqnum = mongo.write_to_queue(event_type, vals, schema, table)
# 刷新 log 日志的位置
mongo.write_log_pos(stream.log_file, stream.log_pos, db, table)
# 将 _id 作为 seqnum 插入队列中
queue_out.put({'seqnum': seqnum})
logger.debug(f"------row------{row}")
logger.debug(f"------stream.log_pos------{stream.log_pos}")
logger.debug(f"------stream.log_file------{stream.log_file}")
stream.close()
# 将非int数据全部转换为 str后续可能在导入的时候转换 则这里就是全部为 str
def process_binlog_dict(_dict):
for k, v in _dict.items():
if not isinstance(v, int):
_dict.update({k: str(v)})
return _dict
# # 因为是使用 csv 文档插入 所有不再对数据的类型做出校验
# def process_binlog_dict(_dict):
# for k, v in _dict.items():
# if isinstance(v, decimal.Decimal):
# _dict.update({k: float(v)})
# elif isinstance(v, datetime.timedelta):
# _dict.update({k: str(v)})
# elif isinstance(v, datetime.datetime):
# _format = "%Y-%m-%d %H:%M:%S"
# d1 = v.strftime(_format)
# _new = datetime.datetime.strptime(d1, _format)
# _dict.update({k: _new})
#
# return _dict
| 36.401869 | 103 | 0.52837 | [
"Apache-2.0"
] | furuiyang0715/mymongo | mymongolib/mysql.py | 4,127 | Python |
#Program to plot a point
from cg_algorithms.circle_algorithms import circle_algorithms
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
import math
import time
def init():
glClearColor(0.0, 0.0, 0.0, 0.0)
gluOrtho2D(-250.0, 250.0, -250.0, 250.0)
def plot_points():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glBegin(GL_POINTS)
bres = circle_algorithms(60, 0, 0)
bres.bresenham_circle()
para = circle_algorithms(100, 0, 0)
para.parameteric_circle()
midp = circle_algorithms(150, 0, 0)
midp.midpoint_circle()
glEnd()
glFlush()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(50, 50)
glutCreateWindow(b'plot_all_points')
glutDisplayFunc(plot_points)
init()
glutMainLoop()
main()
| 20.340909 | 61 | 0.697207 | [
"MIT"
] | Siddharths8212376/cg_algorithms | tests/test_circle.py | 895 | Python |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
import dask.array as da
from dask.array.utils import assert_eq as _assert_eq
from dask.core import get_deps
from dask.context import set_options
import numpy as np
# temporary until numpy functions migrated
try:
from numpy import nanprod
except ImportError: # pragma: no cover
import dask.array.numpy_compat as npcompat
nanprod = npcompat.nanprod
def assert_eq(a, b):
_assert_eq(a, b, equal_nan=True)
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):
assert_eq(da_func(darr), np_func(narr))
assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert same_keys(da_func(darr), da_func(darr))
assert same_keys(da_func(darr, keepdims=True), da_func(darr, keepdims=True))
if use_dtype:
assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
assert same_keys(da_func(darr, dtype='i8'), da_func(darr, dtype='i8'))
if split_every:
a1 = da_func(darr, split_every=2)
a2 = da_func(darr, split_every={0: 2})
assert same_keys(a1, a2)
assert_eq(a1, np_func(narr))
assert_eq(a2, np_func(narr))
assert_eq(da_func(darr, keepdims=True, split_every=2),
np_func(narr, keepdims=True))
@pytest.mark.parametrize('dtype', ['f4', 'i4'])
def test_reductions_1D(dtype):
x = np.arange(5).astype(dtype)
a = da.from_array(x, chunks=(2,))
reduction_1d_test(da.sum, a, np.sum, x)
reduction_1d_test(da.prod, a, np.prod, x)
reduction_1d_test(da.mean, a, np.mean, x)
reduction_1d_test(da.var, a, np.var, x)
reduction_1d_test(da.std, a, np.std, x)
reduction_1d_test(da.min, a, np.min, x, False)
reduction_1d_test(da.max, a, np.max, x, False)
reduction_1d_test(da.any, a, np.any, x, False)
reduction_1d_test(da.all, a, np.all, x, False)
reduction_1d_test(da.nansum, a, np.nansum, x)
reduction_1d_test(da.nanprod, a, nanprod, x)
reduction_1d_test(da.nanmean, a, np.mean, x)
reduction_1d_test(da.nanvar, a, np.var, x)
reduction_1d_test(da.nanstd, a, np.std, x)
reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
reduction_1d_test(da.nanmax, a, np.nanmax, x, False)
def reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True,
split_every=True):
assert_eq(da_func(darr), np_func(narr))
assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert_eq(da_func(darr, axis=0), np_func(narr, axis=0))
assert_eq(da_func(darr, axis=1), np_func(narr, axis=1))
assert_eq(da_func(darr, axis=1, keepdims=True),
np_func(narr, axis=1, keepdims=True))
assert_eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))
assert same_keys(da_func(darr, axis=1), da_func(darr, axis=1))
assert same_keys(da_func(darr, axis=(1, 0)), da_func(darr, axis=(1, 0)))
if use_dtype:
assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
if split_every:
a1 = da_func(darr, split_every=4)
a2 = da_func(darr, split_every={0: 2, 1: 2})
assert same_keys(a1, a2)
assert_eq(a1, np_func(narr))
assert_eq(a2, np_func(narr))
assert_eq(da_func(darr, keepdims=True, split_every=4),
np_func(narr, keepdims=True))
assert_eq(da_func(darr, axis=0, split_every=2), np_func(narr, axis=0))
assert_eq(da_func(darr, axis=0, keepdims=True, split_every=2),
np_func(narr, axis=0, keepdims=True))
assert_eq(da_func(darr, axis=1, split_every=2), np_func(narr, axis=1))
assert_eq(da_func(darr, axis=1, keepdims=True, split_every=2),
np_func(narr, axis=1, keepdims=True))
@pytest.mark.parametrize('dtype', ['f4', 'i4'])
def test_reductions_2D(dtype):
x = np.arange(1, 122).reshape((11, 11)).astype(dtype)
a = da.from_array(x, chunks=(4, 4))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
reduction_2d_test(da.sum, a, np.sum, x)
reduction_2d_test(da.prod, a, np.prod, x)
reduction_2d_test(da.mean, a, np.mean, x)
reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo
reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo
reduction_2d_test(da.min, a, np.min, x, False)
reduction_2d_test(da.max, a, np.max, x, False)
reduction_2d_test(da.any, a, np.any, x, False)
reduction_2d_test(da.all, a, np.all, x, False)
reduction_2d_test(da.nansum, a, np.nansum, x)
reduction_2d_test(da.nanprod, a, nanprod, x)
reduction_2d_test(da.nanmean, a, np.mean, x)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo
reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo
reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False)
@pytest.mark.parametrize(['dfunc', 'func'],
[(da.argmin, np.argmin), (da.argmax, np.argmax),
(da.nanargmin, np.nanargmin),
(da.nanargmax, np.nanargmax)])
def test_arg_reductions(dfunc, func):
x = np.random.random((10, 10, 10))
a = da.from_array(x, chunks=(3, 4, 5))
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
assert_eq(dfunc(a, 1), func(x, 1))
assert_eq(dfunc(a, 2), func(x, 2))
with set_options(split_every=2):
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
assert_eq(dfunc(a, 1), func(x, 1))
assert_eq(dfunc(a, 2), func(x, 2))
pytest.raises(ValueError, lambda: dfunc(a, 3))
pytest.raises(TypeError, lambda: dfunc(a, (0, 1)))
x2 = np.arange(10)
a2 = da.from_array(x2, chunks=3)
assert_eq(dfunc(a2), func(x2))
assert_eq(dfunc(a2, 0), func(x2, 0))
assert_eq(dfunc(a2, 0, split_every=2), func(x2, 0))
@pytest.mark.parametrize(['dfunc', 'func'],
[(da.nanargmin, np.nanargmin),
(da.nanargmax, np.nanargmax)])
def test_nanarg_reductions(dfunc, func):
x = np.random.random((10, 10, 10))
x[5] = np.nan
a = da.from_array(x, chunks=(3, 4, 5))
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
with pytest.raises(ValueError):
dfunc(a, 1).compute()
with pytest.raises(ValueError):
dfunc(a, 2).compute()
x[:] = np.nan
a = da.from_array(x, chunks=(3, 4, 5))
with pytest.raises(ValueError):
dfunc(a).compute()
def test_reductions_2D_nans():
# chunks are a mix of some/all/no NaNs
x = np.full((4, 4), np.nan)
x[:2, :2] = np.array([[1, 2], [3, 4]])
x[2, 2] = 5
x[3, 3] = 6
a = da.from_array(x, chunks=(2, 2))
reduction_2d_test(da.sum, a, np.sum, x, False, False)
reduction_2d_test(da.prod, a, np.prod, x, False, False)
reduction_2d_test(da.mean, a, np.mean, x, False, False)
reduction_2d_test(da.var, a, np.var, x, False, False)
reduction_2d_test(da.std, a, np.std, x, False, False)
reduction_2d_test(da.min, a, np.min, x, False, False)
reduction_2d_test(da.max, a, np.max, x, False, False)
reduction_2d_test(da.any, a, np.any, x, False, False)
reduction_2d_test(da.all, a, np.all, x, False, False)
reduction_2d_test(da.nansum, a, np.nansum, x, False, False)
reduction_2d_test(da.nanprod, a, nanprod, x, False, False)
reduction_2d_test(da.nanmean, a, np.nanmean, x, False, False)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False, False)
reduction_2d_test(da.nanstd, a, np.nanstd, x, False, False)
reduction_2d_test(da.nanmin, a, np.nanmin, x, False, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False, False)
assert_eq(da.argmax(a), np.argmax(x))
assert_eq(da.argmin(a), np.argmin(x))
assert_eq(da.nanargmax(a), np.nanargmax(x))
assert_eq(da.nanargmin(a), np.nanargmin(x))
assert_eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert_eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert_eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert_eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
assert_eq(da.argmax(a, axis=1), np.argmax(x, axis=1))
assert_eq(da.argmin(a, axis=1), np.argmin(x, axis=1))
assert_eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))
assert_eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))
def test_moment():
def moment(x, n, axis=None):
return (((x - x.mean(axis=axis, keepdims=True)) ** n).sum(axis=axis) /
np.ones_like(x).sum(axis=axis))
# Poorly conditioned
x = np.array([1., 2., 3.] * 10).reshape((3, 10)) + 1e8
a = da.from_array(x, chunks=5)
assert_eq(a.moment(2), moment(x, 2))
assert_eq(a.moment(3), moment(x, 3))
assert_eq(a.moment(4), moment(x, 4))
x = np.arange(1, 122).reshape((11, 11)).astype('f8')
a = da.from_array(x, chunks=(4, 4))
assert_eq(a.moment(4, axis=1), moment(x, 4, axis=1))
assert_eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))
# Tree reduction
assert_eq(a.moment(order=4, split_every=4), moment(x, 4))
assert_eq(a.moment(order=4, axis=0, split_every=4), moment(x, 4, axis=0))
assert_eq(a.moment(order=4, axis=1, split_every=4), moment(x, 4, axis=1))
def test_reductions_with_negative_axes():
x = np.random.random((4, 4, 4))
a = da.from_array(x, chunks=2)
assert_eq(a.argmin(axis=-1), x.argmin(axis=-1))
assert_eq(a.argmin(axis=-1, split_every=2), x.argmin(axis=-1))
assert_eq(a.sum(axis=-1), x.sum(axis=-1))
assert_eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))
def test_nan():
x = np.array([[1, np.nan, 3, 4],
[5, 6, 7, np.nan],
[9, 10, 11, 12]])
d = da.from_array(x, chunks=(2, 2))
assert_eq(np.nansum(x), da.nansum(d))
assert_eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
assert_eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
assert_eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
assert_eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
assert_eq(np.nanvar(x), da.nanvar(d))
assert_eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
assert_eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
assert_eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
assert_eq(nanprod(x), da.nanprod(d))
def test_0d_array():
x = da.mean(da.ones(4, chunks=4), axis=0).compute()
y = np.mean(np.ones(4))
assert type(x) == type(y)
x = da.sum(da.zeros(4, chunks=1)).compute()
y = np.sum(np.zeros(4))
assert type(x) == type(y)
def test_reduction_on_scalar():
x = da.from_array(np.array(1.0), chunks=())
assert (x == x).all()
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
def test_tree_reduce_depth():
# 2D
x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))
thresh = {0: 2, 1: 3}
assert_max_deps(x.sum(split_every=thresh), 2 * 3)
assert_max_deps(x.sum(axis=0, split_every=thresh), 2)
assert_max_deps(x.sum(axis=1, split_every=thresh), 3)
assert_max_deps(x.sum(split_every=20), 20, False)
assert_max_deps(x.sum(axis=0, split_every=20), 4)
assert_max_deps(x.sum(axis=1, split_every=20), 6)
# 3D
x = da.from_array(np.arange(11 * 22 * 29).reshape((11, 22, 29)), chunks=(3, 4, 5))
thresh = {0: 2, 1: 3, 2: 4}
assert_max_deps(x.sum(split_every=thresh), 2 * 3 * 4)
assert_max_deps(x.sum(axis=0, split_every=thresh), 2)
assert_max_deps(x.sum(axis=1, split_every=thresh), 3)
assert_max_deps(x.sum(axis=2, split_every=thresh), 4)
assert_max_deps(x.sum(axis=(0, 1), split_every=thresh), 2 * 3)
assert_max_deps(x.sum(axis=(0, 2), split_every=thresh), 2 * 4)
assert_max_deps(x.sum(axis=(1, 2), split_every=thresh), 3 * 4)
assert_max_deps(x.sum(split_every=20), 20, False)
assert_max_deps(x.sum(axis=0, split_every=20), 4)
assert_max_deps(x.sum(axis=1, split_every=20), 6)
assert_max_deps(x.sum(axis=2, split_every=20), 6)
assert_max_deps(x.sum(axis=(0, 1), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(0, 2), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(1, 2), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(0, 1), split_every=40), 4 * 6)
assert_max_deps(x.sum(axis=(0, 2), split_every=40), 4 * 6)
assert_max_deps(x.sum(axis=(1, 2), split_every=40), 6 * 6)
def test_tree_reduce_set_options():
x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))
with set_options(split_every={0: 2, 1: 3}):
assert_max_deps(x.sum(), 2 * 3)
assert_max_deps(x.sum(axis=0), 2)
def test_reduction_names():
x = da.ones(5, chunks=(2,))
assert x.sum().name.startswith('sum')
assert 'max' in x.max().name.split('-')[0]
assert x.var().name.startswith('var')
assert x.all().name.startswith('all')
assert any(k[0].startswith('nansum') for k in da.nansum(x).dask)
assert x.mean().name.startswith('mean')
| 39.060345 | 86 | 0.634591 | [
"MIT"
] | broy-va/SciWorCS | webpage/lib/python3.5/site-packages/dask/array/tests/test_reductions.py | 13,593 | Python |
#!/usr/bin/python
import sdk_common
import yaml
import re
import uuid
import random
import os
import time
# Block in charge of running integration tests (using Docker compose)
class SDKIntegrationTestRunner(sdk_common.BuildStepUsingGradle):
def __init__(self, logger=None):
super(SDKIntegrationTestRunner, self).__init__('SDK integration tests runner', logger)
self.pattern_environment_variable = re.compile(r"\$[\t ]*\{[\t ]*(?P<VALUE>[^\}\t ]+)[\t ]*\}")
self.pattern_shell_command = re.compile(r"(?P<VALUE>\$[\t ]*\{[\t ]*[^\}\t ]+[\t ]*\})")
self.environment_values = self.common_config.get_config().get_testing_parameters()
self.docker_compose_file = self.common_config.get_config().get_docker_compose_file()
def execute(self):
self.print_title()
try:
self.log_info("Configure the docker compose file for this environment")
self.modifying_docker_compose_file()
# self.log_info("Building test server docker image")
# arguments = ["docker-compose", "build"]
# return_code = self.call_command(arguments, None, True)
# if return_code != 0:
# raise Exception('Error code', return_code)
starting_time = time.time()
return_code_int = self.run_integration_tests(starting_time, 0)
if return_code_int != 0:
self.log_info("Trying to run the tests a second time")
return_code_int = self.run_integration_tests(starting_time, return_code_int, False, True)
if return_code_int != 0:
self.log_info("Trying to run the tests a third time")
return_code_int = self.run_integration_tests(starting_time, return_code_int)
if return_code_int != 0:
raise Exception('Integration tests have failed', return_code_int)
except:
self.log_error('Failed to successfully run all integration tests')
return False
self.log_info("Done.")
return True
def run_integration_tests(self, starting_time, previous_return_code_int, clean_at_the_end=True,
restart_services_at_the_end=False):
if time.time() < starting_time + 80:
self.gather_docker_information()
self.log_info("Running integration tests")
return_code_int = self.call_command(["docker-compose", "up", "--build", "--no-recreate", "--exit-code-from",
"testrunner"], None, True, True)
self.gather_docker_information()
if clean_at_the_end:
self.log_info("Stopping any running containers")
self.call_command(["docker-compose", "down", "--remove-orphans"], None, True, True)
self.call_command(["docker-compose", "kill"], None, True, True)
if restart_services_at_the_end:
self.call_command(["docker-compose", "restart"], None, True, True)
return return_code_int
# The failure of the command comes from genuine test errors rather than docker issues
else:
return previous_return_code_int
def gather_docker_information(self):
self.log_info("Gathering information about docker images and containers")
self.call_command(["docker", "ps"], None, True, True)
self.call_command(["docker", "image", "ls"], None, True, True)
self.call_command(["docker-compose", "top"], None, True, True)
def modifying_docker_compose_file(self):
if not self.docker_compose_file or not os.path.exists(self.docker_compose_file):
raise Exception("Missing docker compose file [%s]" % str(self.docker_compose_file))
with open(self.docker_compose_file, 'r') as f:
cleansed_content = self.replace_environment_value(yaml.load(f))
named_containers = self.name_containers(cleansed_content)
if named_containers:
with open(self.docker_compose_file, 'w') as f:
f.write(yaml.dump(named_containers))
def find_environment_variable_value(self, env_variable):
command = '${%s}' % str(env_variable)
if self.environment_values:
replacement = self.environment_values.get(env_variable)
if not replacement and type(replacement) is str:
replacement = "''"
return replacement if replacement else command
return command
def replace_environment_value(self, obj):
if not obj:
return None
if type(obj) is str:
result = self.pattern_environment_variable.search(obj)
replacement = str(obj)
if result:
for m in self.pattern_shell_command.finditer(obj):
command = m.group(0)
env_result = self.pattern_environment_variable.search(command)
env_variable = env_result.group('VALUE')
replacement = replacement.replace(command,
self.find_environment_variable_value(env_variable))
return replacement
elif type(obj) is list:
return [self.replace_environment_value(a) for a in obj]
elif type(obj) is dict:
return {k: self.replace_environment_value(v) for k, v in obj.items()}
else:
return obj
def add_name(self, ref, description):
if type(description) is not dict:
return description
container_name = '%s_%s%s_%s' % (
(str(ref)).strip().lower(), str(uuid.uuid1())[-4:].replace('-', ''),
str(uuid.uuid4())[-4:].replace('-', ''), str(random.randint(1, 20)))
description.update({'container_name': container_name})
return description
def name_containers(self, obj):
if not obj:
return None
if type(obj) is not dict:
return obj
if not obj.get('version') and not obj.get('services'):
# docker compose v1
return {k: self.add_name(k, v) for k, v in obj.items()}
else: # docker compose v1 or v3
obj['services'] = {k: self.add_name(k, v) for k, v in obj.get('services').items()}
return obj
| 48.259542 | 120 | 0.61373 | [
"Apache-2.0"
] | ARMmbed/mbed-cloud-sdk-java | scripts/sdk_run_integration_tests.py | 6,322 | Python |
#!/usr/bin/env python
import numpy as np
import scipy
from shared_utils import ArrayUtils
class SkellamMetrics:
def __init__(self, x_metrics, y_metrics, y_hat, model, l0, l1, training_values):
self._y = y_metrics
self._y_hat = y_hat
self.model = model
self.l0 = ArrayUtils.convert_to_array(l0)
self.l1 = ArrayUtils.convert_to_array(l1)
self.training_values = training_values
self._x0, self._x1 = self.split_or_duplicate_x(x_metrics)
self.max_ll = self.model.fun
self.coeff_size = self._x0.shape[1]
self.lambda_0_coefficients = self.model.x[0 : self.coeff_size].reshape(-1, 1)
self.lambda_1_coefficients = self.model.x[self.coeff_size :].reshape(-1, 1)
self.train_length = len(training_values[0])
@staticmethod
def split_or_duplicate_x(x):
return ArrayUtils.split_or_duplicate_x(x, False)
def sse(self):
return ((self._y - self._y_hat) ** 2).sum()
def _y_bar(self):
return self._y.mean()
def sst(self):
return ((self._y - self._y_bar()) ** 2).sum()
def r2(self):
"""Calculate R2 for either the train model or the test model"""
sse_sst = self.sse() / self.sst()
return 1 - sse_sst
def adjusted_r2(self):
"""Calculate adjusted R2 for either the train model or the test model"""
r2 = self.r2()
return 1 - (1-r2)*(self.train_length - 1)/(self.train_length - self.coeff_size - 1)
def log_likelihood(self):
"""Returns the maximum of the log likelihood function"""
return self.max_ll
def aic(self):
return 2*self.coeff_size - 2*np.log(self.max_ll)
def bic(self):
return self.coeff_size*np.log(self.train_length) - 2*np.log(self.max_ll)
def _calculate_lambda(self):
"""Create arrays for our predictions of the two Poisson distributions
"""
_lambda0 = ArrayUtils.convert_to_array(
np.exp(np.squeeze(self._x0 @ self.lambda_0_coefficients))
)
_lambda1 = ArrayUtils.convert_to_array(
np.exp(np.squeeze(self._x1 @ self.lambda_1_coefficients))
)
return _lambda0, _lambda1
def _calculate_v(self):
"""Create diagonal matrix consisting of our predictions of the Poisson distributions
"""
_lambda0, _lambda1 = self._calculate_lambda()
_v0 = np.diagflat(_lambda0)
_v1 = np.diagflat(_lambda1)
return _v0, _v1
def _calculate_w(self):
"""Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions
with their observed values
"""
_lambda0, _lambda1 = self._calculate_lambda()
_w0 = np.diagflat((self.l0 - _lambda0.reshape(-1, 1)) ** 2)
_w1 = np.diagflat((self.l1 - _lambda1.reshape(-1, 1)) ** 2)
return _w0, _w1
def _calculate_robust_covariance(self):
"""Calculate robust variance covariance matrices for our two sets of coefficients
"""
_v0, _v1 = self._calculate_v()
_w0, _w1 = self._calculate_w()
_robust_cov0 = (
np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))
* np.dot(np.dot(self._x0.T, _w0), self._x0)
* np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))
)
_robust_cov1 = (
np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))
* np.dot(np.dot(self._x1.T, _w1), self._x1)
* np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))
)
return _robust_cov0, _robust_cov1
def _calculate_robust_standard_errors(self):
"""Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal
values in the variance covariance matrices
"""
_robust_cov0, _robust_cov1 = self._calculate_robust_covariance()
_std_error0 = np.sqrt(np.diag(_robust_cov0))
_std_error1 = np.sqrt(np.diag(_robust_cov1))
return _std_error0, _std_error1
def _calculate_z_values(self):
"""Calculate z statistics for our two sets of coefficients
"""
_std_error0, _std_error1 = self._calculate_robust_standard_errors()
_z_values0 = self.lambda_0_coefficients[:, 0] / _std_error0
_z_values1 = self.lambda_1_coefficients[:, 0] / _std_error1
return _z_values0, _z_values1
def _calculate_p_values(self):
"""Calculate p values for our two sets of coefficients
"""
_z_values0, _z_values1 = self._calculate_z_values()
_p_values0 = scipy.stats.norm.sf(abs(_z_values0)) * 2
_p_values1 = scipy.stats.norm.sf(abs(_z_values1)) * 2
return _p_values0, _p_values1
| 38.524194 | 119 | 0.639941 | [
"Apache-2.0"
] | nathan-bennett/skellam | metrics/__init__.py | 4,777 | Python |
#!/bin/env python
# -*- coding: utf-8 -*-
"""test Queue
by Valentyn Stadnytskyi
created: August 2, 2019
This is a test library to evaluate the performance of the code.
Queue is an abstract data structure, somewhat similar to Stacks.
Unlike stacks, a queue is open at both its ends.
One end is always used to insert data (enqueue) and the other is used to remove data (dequeue)..
to run unittest: python3 -m unittest test_queue
"""
import unittest
from numpy.testing import assert_array_equal
class CircularBufferTest(unittest.TestCase):
def test_queue_end(self):
"""
test if the default pointer in the buffer is -1.
"""
from ..circular_buffer import CircularBuffer
buffer = CircularBuffer(shape=(100, 2))
self.assertEqual(buffer.pointer, -1)
def test_queue_end_two(self):
"""
test if the default pointer in the buffer is -1.
"""
from ..circular_buffer import CircularBuffer
buffer = CircularBuffer(shape=(100, 2))
self.assertEqual(buffer.pointer, -1)
def test_1(self):
from numpy import random
from ..circular_buffer import CircularBuffer
buffer = CircularBuffer(shape=(100, 2, 4))
data = random.randint(1024, size=(5, 2, 4))
buffer.packet_length = 5
buffer.append(data)
self.assertEqual(buffer.pointer, 4)
self.assertEqual(buffer.g_pointer, 4)
self.assertEqual(buffer.packet_pointer, 0)
self.assertEqual(buffer.g_packet_pointer, 0)
def test_attributes(self):
from ..circular_buffer import CircularBuffer
from numpy import random
buffer = CircularBuffer(shape=(100, 2), dtype='int16')
data = random.randint(1024, size=(5, 2))
buffer.append(data)
self.assertEqual(buffer.shape, (100, 2))
self.assertEqual(buffer.size, 100*2)
self.assertEqual(buffer.dtype, 'int16')
def test_full(self):
from ..circular_buffer import CircularBuffer
from numpy import random, sum
buffer = CircularBuffer(shape=(100, 2, 3), dtype='float64')
data = random.randint(1024, size=(50, 2, 3))
buffer.append(data)
assert buffer.pointer == 49
assert buffer.g_pointer == 49
assert buffer.shape == (100, 2, 3)
assert buffer.size == buffer.buffer.shape[0]*buffer.buffer.shape[1]*buffer.buffer.shape[2]
assert buffer.dtype == 'float64'
assert sum(buffer.get_i_j(i=5, j=6)) == sum(buffer.buffer[5])
# get data between pointers 5 and 10 and compare to get 5 points from pointer M
assert sum(buffer.get_i_j(i=5, j=10)) == sum(buffer.get_N(N=5, M=9))
def test_vector_append(self):
from ..circular_buffer import CircularBuffer
from numpy import random, sum, zeros, concatenate
buffer = CircularBuffer(shape=(1000, 3))
vec1 = zeros((1, 3))
vec2 = zeros((1, 3))
vec1[0, 0] = 0.0
vec1[0, 1] = 1.0
vec1[0, 2] = 2.0
buffer.append(vec1)
vec2[0, 0] = 3.0
vec2[0, 1] = 4.0
vec2[0, 2] = 5.0
buffer.append(vec2)
assert_array_equal(buffer.get_last_value(), vec2)
assert_array_equal(buffer.get_last_N(2),concatenate((vec1, vec2)))
def test_get_data(self):
from ..circular_buffer import CircularBuffer
from numpy import random, sum, zeros, concatenate, array
buffer = CircularBuffer(shape=(1000, 3))
res_buffer = []
j = 0
for i in range(5):
vec = zeros((3,))
vec[0] = j
vec[1] = j**2
vec[2] = j**3
buffer.append(vec)
res_buffer.append(vec)
j+=1
assert_array_equal(array(res_buffer),buffer.get_data())
for i in range(555):
vec = zeros((3,))
vec[0] = j
vec[1] = j**2
vec[2] = j**3
buffer.append(vec)
res_buffer.append(vec)
j+=1
assert_array_equal(array(res_buffer),buffer.get_data())
#the 1000-long buffer spils over and overwrites existing values. The function get_data returns only
for i in range(1300):
vec = zeros((3,))
vec[0] = j
vec[1] = j**2
vec[2] = j**3
buffer.append(vec)
res_buffer.append(vec)
j+=1
assert_array_equal(array(res_buffer[-1000:]),buffer.get_data())
| 36.714286 | 108 | 0.582144 | [
"BSD-3-Clause"
] | rcm2dev/circular_buffer_numpy | circular_buffer_numpy/tests/test_circular_buffer.py | 4,626 | Python |
from __future__ import absolute_import
from contextlib import contextmanager
from mock import Mock, patch
from celery import states
from celery.exceptions import IncompleteStream, TimeoutError
from celery.five import range
from celery.result import (
AsyncResult,
EagerResult,
TaskSetResult,
result_from_tuple,
)
from celery.utils import uuid
from celery.utils.serialization import pickle
from celery.tests.case import AppCase, depends_on_current_app
def mock_task(name, state, result):
return dict(id=uuid(), name=name, state=state, result=result)
def save_result(app, task):
traceback = 'Some traceback'
if task['state'] == states.SUCCESS:
app.backend.mark_as_done(task['id'], task['result'])
elif task['state'] == states.RETRY:
app.backend.mark_as_retry(
task['id'], task['result'], traceback=traceback,
)
else:
app.backend.mark_as_failure(
task['id'], task['result'], traceback=traceback,
)
def make_mock_group(app, size=10):
tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)]
[save_result(app, task) for task in tasks]
return [app.AsyncResult(task['id']) for task in tasks]
class test_AsyncResult(AppCase):
def setup(self):
self.task1 = mock_task('task1', states.SUCCESS, 'the')
self.task2 = mock_task('task2', states.SUCCESS, 'quick')
self.task3 = mock_task('task3', states.FAILURE, KeyError('brown'))
self.task4 = mock_task('task3', states.RETRY, KeyError('red'))
for task in (self.task1, self.task2, self.task3, self.task4):
save_result(self.app, task)
@self.app.task(shared=False)
def mytask():
pass
self.mytask = mytask
def test_compat_properties(self):
x = self.app.AsyncResult('1')
self.assertEqual(x.task_id, x.id)
x.task_id = '2'
self.assertEqual(x.id, '2')
def test_children(self):
x = self.app.AsyncResult('1')
children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x.backend = Mock()
x.backend.get_children.return_value = children
x.backend.READY_STATES = states.READY_STATES
self.assertTrue(x.children)
self.assertEqual(len(x.children), 3)
def test_propagates_for_parent(self):
x = self.app.AsyncResult(uuid())
x.backend = Mock()
x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE)
with self.assertRaises(KeyError):
x.get(propagate=True)
self.assertFalse(x.backend.wait_for.called)
x.parent = EagerResult(uuid(), 42, states.SUCCESS)
x.get(propagate=True)
self.assertTrue(x.backend.wait_for.called)
def test_get_children(self):
tid = uuid()
x = self.app.AsyncResult(tid)
child = [self.app.AsyncResult(uuid()).as_tuple()
for i in range(10)]
x.backend._cache[tid] = {'children': child}
self.assertTrue(x.children)
self.assertEqual(len(x.children), 10)
x.backend._cache[tid] = {'result': None}
self.assertIsNone(x.children)
def test_build_graph_get_leaf_collect(self):
x = self.app.AsyncResult('1')
x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None}
c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
x.iterdeps = Mock()
x.iterdeps.return_value = (
(None, x),
(x, c[0]),
(c[0], c[1]),
(c[1], c[2])
)
x.backend.READY_STATES = states.READY_STATES
self.assertTrue(x.graph)
self.assertIs(x.get_leaf(), 2)
it = x.collect()
self.assertListEqual(list(it), [
(x, None),
(c[0], 0),
(c[1], 1),
(c[2], 2),
])
def test_iterdeps(self):
x = self.app.AsyncResult('1')
x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None}
c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
for child in c:
child.backend = Mock()
child.backend.get_children.return_value = []
x.backend.get_children = Mock()
x.backend.get_children.return_value = c
it = x.iterdeps()
self.assertListEqual(list(it), [
(None, x),
(x, c[0]),
(x, c[1]),
(x, c[2]),
])
x.backend._cache.pop('1')
x.ready = Mock()
x.ready.return_value = False
with self.assertRaises(IncompleteStream):
list(x.iterdeps())
list(x.iterdeps(intermediate=True))
def test_eq_not_implemented(self):
self.assertFalse(self.app.AsyncResult('1') == object())
@depends_on_current_app
def test_reduce(self):
a1 = self.app.AsyncResult('uuid', task_name=self.mytask.name)
restored = pickle.loads(pickle.dumps(a1))
self.assertEqual(restored.id, 'uuid')
self.assertEqual(restored.task_name, self.mytask.name)
a2 = self.app.AsyncResult('uuid')
self.assertEqual(pickle.loads(pickle.dumps(a2)).id, 'uuid')
def test_successful(self):
ok_res = self.app.AsyncResult(self.task1['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok_res2 = self.app.AsyncResult(self.task4['id'])
self.assertTrue(ok_res.successful())
self.assertFalse(nok_res.successful())
self.assertFalse(nok_res2.successful())
pending_res = self.app.AsyncResult(uuid())
self.assertFalse(pending_res.successful())
def test_str(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
self.assertEqual(str(ok_res), self.task1['id'])
self.assertEqual(str(ok2_res), self.task2['id'])
self.assertEqual(str(nok_res), self.task3['id'])
pending_id = uuid()
pending_res = self.app.AsyncResult(pending_id)
self.assertEqual(str(pending_res), pending_id)
def test_repr(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
self.assertEqual(repr(ok_res), '<AsyncResult: %s>' % (
self.task1['id']))
self.assertEqual(repr(ok2_res), '<AsyncResult: %s>' % (
self.task2['id']))
self.assertEqual(repr(nok_res), '<AsyncResult: %s>' % (
self.task3['id']))
pending_id = uuid()
pending_res = self.app.AsyncResult(pending_id)
self.assertEqual(repr(pending_res), '<AsyncResult: %s>' % (
pending_id))
def test_hash(self):
self.assertEqual(hash(self.app.AsyncResult('x0w991')),
hash(self.app.AsyncResult('x0w991')))
self.assertNotEqual(hash(self.app.AsyncResult('x0w991')),
hash(self.app.AsyncResult('x1w991')))
def test_get_traceback(self):
ok_res = self.app.AsyncResult(self.task1['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok_res2 = self.app.AsyncResult(self.task4['id'])
self.assertFalse(ok_res.traceback)
self.assertTrue(nok_res.traceback)
self.assertTrue(nok_res2.traceback)
pending_res = self.app.AsyncResult(uuid())
self.assertFalse(pending_res.traceback)
def test_get(self):
ok_res = self.app.AsyncResult(self.task1['id'])
ok2_res = self.app.AsyncResult(self.task2['id'])
nok_res = self.app.AsyncResult(self.task3['id'])
nok2_res = self.app.AsyncResult(self.task4['id'])
self.assertEqual(ok_res.get(), 'the')
self.assertEqual(ok2_res.get(), 'quick')
with self.assertRaises(KeyError):
nok_res.get()
self.assertTrue(nok_res.get(propagate=False))
self.assertIsInstance(nok2_res.result, KeyError)
self.assertEqual(ok_res.info, 'the')
def test_get_timeout(self):
res = self.app.AsyncResult(self.task4['id']) # has RETRY state
with self.assertRaises(TimeoutError):
res.get(timeout=0.001)
pending_res = self.app.AsyncResult(uuid())
with patch('celery.result.time') as _time:
with self.assertRaises(TimeoutError):
pending_res.get(timeout=0.001, interval=0.001)
_time.sleep.assert_called_with(0.001)
def test_get_timeout_longer(self):
res = self.app.AsyncResult(self.task4['id']) # has RETRY state
with patch('celery.result.time') as _time:
with self.assertRaises(TimeoutError):
res.get(timeout=1, interval=1)
_time.sleep.assert_called_with(1)
def test_ready(self):
oks = (self.app.AsyncResult(self.task1['id']),
self.app.AsyncResult(self.task2['id']),
self.app.AsyncResult(self.task3['id']))
self.assertTrue(all(result.ready() for result in oks))
self.assertFalse(self.app.AsyncResult(self.task4['id']).ready())
self.assertFalse(self.app.AsyncResult(uuid()).ready())
class test_ResultSet(AppCase):
def test_resultset_repr(self):
self.assertTrue(repr(self.app.ResultSet(
[self.app.AsyncResult(t) for t in ['1', '2', '3']])))
def test_eq_other(self):
self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1)
self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1]))
def test_get(self):
x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]])
b = x.results[0].backend = Mock()
b.supports_native_join = False
x.join_native = Mock()
x.join = Mock()
x.get()
self.assertTrue(x.join.called)
b.supports_native_join = True
x.get()
self.assertTrue(x.join_native.called)
def test_add(self):
x = self.app.ResultSet([1])
x.add(2)
self.assertEqual(len(x), 2)
x.add(2)
self.assertEqual(len(x), 2)
@contextmanager
def dummy_copy(self):
with patch('celery.result.copy') as copy:
def passt(arg):
return arg
copy.side_effect = passt
yield
def test_iterate_respects_subpolling_interval(self):
r1 = self.app.AsyncResult(uuid())
r2 = self.app.AsyncResult(uuid())
backend = r1.backend = r2.backend = Mock()
backend.subpolling_interval = 10
ready = r1.ready = r2.ready = Mock()
def se(*args, **kwargs):
ready.side_effect = KeyError()
return False
ready.return_value = False
ready.side_effect = se
x = self.app.ResultSet([r1, r2])
with self.dummy_copy():
with patch('celery.result.time') as _time:
with self.assertRaises(KeyError):
list(x.iterate())
_time.sleep.assert_called_with(10)
backend.subpolling_interval = 0
with patch('celery.result.time') as _time:
with self.assertRaises(KeyError):
ready.return_value = False
ready.side_effect = se
list(x.iterate())
self.assertFalse(_time.sleep.called)
def test_times_out(self):
r1 = self.app.AsyncResult(uuid)
r1.ready = Mock()
r1.ready.return_value = False
x = self.app.ResultSet([r1])
with self.dummy_copy():
with patch('celery.result.time'):
with self.assertRaises(TimeoutError):
list(x.iterate(timeout=1))
def test_add_discard(self):
x = self.app.ResultSet([])
x.add(self.app.AsyncResult('1'))
self.assertIn(self.app.AsyncResult('1'), x.results)
x.discard(self.app.AsyncResult('1'))
x.discard(self.app.AsyncResult('1'))
x.discard('1')
self.assertNotIn(self.app.AsyncResult('1'), x.results)
x.update([self.app.AsyncResult('2')])
def test_clear(self):
x = self.app.ResultSet([])
r = x.results
x.clear()
self.assertIs(x.results, r)
class MockAsyncResultFailure(AsyncResult):
@property
def result(self):
return KeyError('baz')
@property
def state(self):
return states.FAILURE
def get(self, propagate=True, **kwargs):
if propagate:
raise self.result
return self.result
class MockAsyncResultSuccess(AsyncResult):
forgotten = False
def forget(self):
self.forgotten = True
@property
def result(self):
return 42
@property
def state(self):
return states.SUCCESS
def get(self, **kwargs):
return self.result
class SimpleBackend(object):
ids = []
def __init__(self, ids=[]):
self.ids = ids
def get_many(self, *args, **kwargs):
return ((id, {'result': i, 'status': states.SUCCESS})
for i, id in enumerate(self.ids))
class test_TaskSetResult(AppCase):
def setup(self):
self.size = 10
self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size))
def test_total(self):
self.assertEqual(self.ts.total, self.size)
def test_compat_properties(self):
self.assertEqual(self.ts.taskset_id, self.ts.id)
self.ts.taskset_id = 'foo'
self.assertEqual(self.ts.taskset_id, 'foo')
def test_compat_subtasks_kwarg(self):
x = TaskSetResult(uuid(), subtasks=[1, 2, 3])
self.assertEqual(x.results, [1, 2, 3])
def test_itersubtasks(self):
it = self.ts.itersubtasks()
for i, t in enumerate(it):
self.assertEqual(t.get(), i)
class test_GroupResult(AppCase):
def setup(self):
self.size = 10
self.ts = self.app.GroupResult(
uuid(), make_mock_group(self.app, self.size),
)
@depends_on_current_app
def test_is_pickleable(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
self.assertEqual(pickle.loads(pickle.dumps(ts)), ts)
ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2)
def test_len(self):
self.assertEqual(len(self.ts), self.size)
def test_eq_other(self):
self.assertFalse(self.ts == 1)
@depends_on_current_app
def test_reduce(self):
self.assertTrue(pickle.loads(pickle.dumps(self.ts)))
def test_iterate_raises(self):
ar = MockAsyncResultFailure(uuid(), app=self.app)
ts = self.app.GroupResult(uuid(), [ar])
it = ts.iterate()
with self.assertRaises(KeyError):
next(it)
def test_forget(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
MockAsyncResultSuccess(uuid(), app=self.app)]
ts = self.app.GroupResult(uuid(), subs)
ts.forget()
for sub in subs:
self.assertTrue(sub.forgotten)
def test_getitem(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
MockAsyncResultSuccess(uuid(), app=self.app)]
ts = self.app.GroupResult(uuid(), subs)
self.assertIs(ts[0], subs[0])
def test_save_restore(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
MockAsyncResultSuccess(uuid(), app=self.app)]
ts = self.app.GroupResult(uuid(), subs)
ts.save()
with self.assertRaises(AttributeError):
ts.save(backend=object())
self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks,
ts.subtasks)
ts.delete()
self.assertIsNone(self.app.GroupResult.restore(ts.id))
with self.assertRaises(AttributeError):
self.app.GroupResult.restore(ts.id, backend=object())
def test_join_native(self):
backend = SimpleBackend()
subtasks = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), subtasks)
backend.ids = [subtask.id for subtask in subtasks]
res = ts.join_native()
self.assertEqual(res, list(range(10)))
def test_join_native_raises(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
ts.iter_native = Mock()
ts.iter_native.return_value = iter([
(uuid(), {'status': states.FAILURE, 'result': KeyError()})
])
with self.assertRaises(KeyError):
ts.join_native(propagate=True)
def test_failed_join_report(self):
res = Mock()
ts = self.app.GroupResult(uuid(), [res])
res.state = states.FAILURE
res.backend.is_cached.return_value = True
self.assertIs(next(ts._failed_join_report()), res)
res.backend.is_cached.return_value = False
with self.assertRaises(StopIteration):
next(ts._failed_join_report())
def test_repr(self):
self.assertTrue(repr(
self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
))
def test_children_is_results(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
self.assertIs(ts.children, ts.results)
def test_iter_native(self):
backend = SimpleBackend()
subtasks = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), subtasks)
backend.ids = [subtask.id for subtask in subtasks]
self.assertEqual(len(list(ts.iter_native())), 10)
def test_iterate_yields(self):
ar = MockAsyncResultSuccess(uuid(), app=self.app)
ar2 = MockAsyncResultSuccess(uuid(), app=self.app)
ts = self.app.GroupResult(uuid(), [ar, ar2])
it = ts.iterate()
self.assertEqual(next(it), 42)
self.assertEqual(next(it), 42)
def test_iterate_eager(self):
ar1 = EagerResult(uuid(), 42, states.SUCCESS)
ar2 = EagerResult(uuid(), 42, states.SUCCESS)
ts = self.app.GroupResult(uuid(), [ar1, ar2])
it = ts.iterate()
self.assertEqual(next(it), 42)
self.assertEqual(next(it), 42)
def test_join_timeout(self):
ar = MockAsyncResultSuccess(uuid(), app=self.app)
ar2 = MockAsyncResultSuccess(uuid(), app=self.app)
ar3 = self.app.AsyncResult(uuid())
ts = self.app.GroupResult(uuid(), [ar, ar2, ar3])
with self.assertRaises(TimeoutError):
ts.join(timeout=0.0000001)
ar4 = self.app.AsyncResult(uuid())
ar4.get = Mock()
ts2 = self.app.GroupResult(uuid(), [ar4])
self.assertTrue(ts2.join(timeout=0.1))
def test_iter_native_when_empty_group(self):
ts = self.app.GroupResult(uuid(), [])
self.assertListEqual(list(ts.iter_native()), [])
def test_iterate_simple(self):
it = self.ts.iterate()
results = sorted(list(it))
self.assertListEqual(results, list(range(self.size)))
def test___iter__(self):
self.assertListEqual(list(iter(self.ts)), self.ts.results)
def test_join(self):
joined = self.ts.join()
self.assertListEqual(joined, list(range(self.size)))
def test_successful(self):
self.assertTrue(self.ts.successful())
def test_failed(self):
self.assertFalse(self.ts.failed())
def test_waiting(self):
self.assertFalse(self.ts.waiting())
def test_ready(self):
self.assertTrue(self.ts.ready())
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), len(self.ts))
class test_pending_AsyncResult(AppCase):
def setup(self):
self.task = self.app.AsyncResult(uuid())
def test_result(self):
self.assertIsNone(self.task.result)
class test_failed_AsyncResult(test_GroupResult):
def setup(self):
self.size = 11
subtasks = make_mock_group(self.app, 10)
failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))
save_result(self.app, failed)
failed_res = self.app.AsyncResult(failed['id'])
self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res])
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), len(self.ts) - 1)
def test_iterate_simple(self):
it = self.ts.iterate()
def consume():
return list(it)
with self.assertRaises(KeyError):
consume()
def test_join(self):
with self.assertRaises(KeyError):
self.ts.join()
def test_successful(self):
self.assertFalse(self.ts.successful())
def test_failed(self):
self.assertTrue(self.ts.failed())
class test_pending_Group(AppCase):
def setup(self):
self.ts = self.app.GroupResult(
uuid(), [self.app.AsyncResult(uuid()),
self.app.AsyncResult(uuid())])
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), 0)
def test_ready(self):
self.assertFalse(self.ts.ready())
def test_waiting(self):
self.assertTrue(self.ts.waiting())
def x_join(self):
with self.assertRaises(TimeoutError):
self.ts.join(timeout=0.001)
def x_join_longer(self):
with self.assertRaises(TimeoutError):
self.ts.join(timeout=1)
class test_EagerResult(AppCase):
def setup(self):
@self.app.task(shared=False)
def raising(x, y):
raise KeyError(x, y)
self.raising = raising
def test_wait_raises(self):
res = self.raising.apply(args=[3, 3])
with self.assertRaises(KeyError):
res.wait()
self.assertTrue(res.wait(propagate=False))
def test_wait(self):
res = EagerResult('x', 'x', states.RETRY)
res.wait()
self.assertEqual(res.state, states.RETRY)
self.assertEqual(res.status, states.RETRY)
def test_forget(self):
res = EagerResult('x', 'x', states.RETRY)
res.forget()
def test_revoke(self):
res = self.raising.apply(args=[3, 3])
self.assertFalse(res.revoke())
class test_tuples(AppCase):
def test_AsyncResult(self):
x = self.app.AsyncResult(uuid())
self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))
self.assertEqual(x, result_from_tuple(x, self.app))
def test_with_parent(self):
x = self.app.AsyncResult(uuid())
x.parent = self.app.AsyncResult(uuid())
y = result_from_tuple(x.as_tuple(), self.app)
self.assertEqual(y, x)
self.assertEqual(y.parent, x.parent)
self.assertIsInstance(y.parent, AsyncResult)
def test_compat(self):
uid = uuid()
x = result_from_tuple([uid, []], app=self.app)
self.assertEqual(x.id, uid)
def test_GroupResult(self):
x = self.app.GroupResult(
uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)],
)
self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))
self.assertEqual(x, result_from_tuple(x, self.app))
| 32.691877 | 77 | 0.605604 | [
"BSD-3-Clause"
] | sivaprakashniet/push_pull | p2p/lib/python2.7/site-packages/celery/tests/tasks/test_result.py | 23,342 | Python |
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from contextlib import contextmanager
import datetime
import io
import os
import time
import tempfile
import xml.etree.ElementTree as ET
from salesforce_bulk.util import IteratorBytesIO
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy import text
from sqlalchemy import types
from sqlalchemy import event
import requests
import unicodecsv
from cumulusci.core.utils import process_bool_arg, ordered_yaml_load
from cumulusci.core.exceptions import BulkDataException
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.utils import convert_to_snake_case, log_progress, os_friendly_path
# TODO: UserID Catcher
# TODO: Dater
# Create a custom sqlalchemy field type for sqlite datetime fields which are stored as integer of epoch time
class EpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)
def process_bind_param(self, value, dialect):
return int((value - self.epoch).total_seconds()) * 1000
def process_result_value(self, value, dialect):
if value is not None:
return self.epoch + datetime.timedelta(seconds=value / 1000)
# Listen for sqlalchemy column_reflect event and map datetime fields to EpochType
@event.listens_for(Table, "column_reflect")
def setup_epoch(inspector, table, column_info):
if isinstance(column_info["type"], types.DateTime):
column_info["type"] = EpochType()
class BulkJobTaskMixin(object):
def _job_state_from_batches(self, job_id):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job_id)
response = requests.get(uri, headers=self.bulk.headers())
return self._parse_job_state(response.content)
def _parse_job_state(self, xml):
tree = ET.fromstring(xml)
completed = 0
pending = 0
failed = 0
for el in tree.iterfind(".//{%s}state" % self.bulk.jobNS):
state = el.text
if state == "Not Processed":
return "Aborted"
elif state == "Failed":
failed += 1
elif state == "Completed":
completed += 1
else: # Queued, InProgress
pending += 1
if pending:
return "InProgress"
elif failed:
return "Failed"
else:
return "Completed"
def _wait_for_job(self, job_id):
while True:
job_status = self.bulk.job_status(job_id)
self.logger.info(
" Waiting for job {} ({}/{})".format(
job_id,
job_status["numberBatchesCompleted"],
job_status["numberBatchesTotal"],
)
)
result = self._job_state_from_batches(job_id)
if result != "InProgress":
break
time.sleep(10)
self.logger.info("Job {} finished with result: {}".format(job_id, result))
return result
def _sql_bulk_insert_from_csv(self, conn, table, columns, data_file):
if conn.dialect.name in ("postgresql", "psycopg2"):
# psycopg2 (the postgres driver) supports COPY FROM
# to efficiently bulk insert rows in CSV format
with conn.connection.cursor() as cursor:
cursor.copy_expert(
"COPY {} ({}) FROM STDIN WITH (FORMAT CSV)".format(
table, ",".join(columns)
),
data_file,
)
else:
# For other db drivers we need to use standard SQL
# -- this is optimized for ease of implementation
# rather than performance and may need more work.
reader = unicodecsv.DictReader(data_file, columns)
table = self.metadata.tables[table]
rows = list(reader)
if rows:
conn.execute(table.insert().values(rows))
self.session.flush()
class DeleteData(BaseSalesforceApiTask, BulkJobTaskMixin):
task_options = {
"objects": {
"description": "A list of objects to delete records from in order of deletion. If passed via command line, use a comma separated string",
"required": True,
},
"hardDelete": {
"description": "If True, perform a hard delete, bypassing the recycle bin. Default: False"
},
}
def _init_options(self, kwargs):
super(DeleteData, self)._init_options(kwargs)
# Split and trim objects string into a list if not already a list
if not isinstance(self.options["objects"], list):
self.options["objects"] = [
obj.strip() for obj in self.options["objects"].split(",")
]
self.options["hardDelete"] = process_bool_arg(self.options.get("hardDelete"))
def _run_task(self):
for obj in self.options["objects"]:
self.logger.info("Deleting all {} records".format(obj))
delete_job = self._create_job(obj)
if delete_job is not None:
self._wait_for_job(delete_job)
def _create_job(self, obj):
# Query for rows to delete
delete_rows = self._query_salesforce_for_records_to_delete(obj)
if not delete_rows:
self.logger.info(" No {} objects found, skipping delete".format(obj))
return
# Upload all the batches
operation = "hardDelete" if self.options["hardDelete"] else "delete"
delete_job = self.bulk.create_job(obj, operation)
self.logger.info(" Deleting {} {} records".format(len(delete_rows), obj))
batch_num = 1
for batch in self._upload_batches(delete_job, delete_rows):
self.logger.info(" Uploaded batch {}".format(batch))
batch_num += 1
self.bulk.close_job(delete_job)
return delete_job
def _query_salesforce_for_records_to_delete(self, obj):
# Query for all record ids
self.logger.info(" Querying for all {} objects".format(obj))
query_job = self.bulk.create_query_job(obj, contentType="CSV")
batch = self.bulk.query(query_job, "select Id from {}".format(obj))
while not self.bulk.is_batch_done(batch, query_job):
time.sleep(10)
self.bulk.close_job(query_job)
delete_rows = []
for result in self.bulk.get_all_results_for_query_batch(batch, query_job):
reader = unicodecsv.DictReader(result, encoding="utf-8")
for row in reader:
delete_rows.append(row)
return delete_rows
def _split_batches(self, data, batch_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(data), batch_size):
yield data[i : i + batch_size]
def _upload_batches(self, job, data):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job)
headers = self.bulk.headers({"Content-Type": "text/csv"})
for batch in self._split_batches(data, 10000):
rows = ['"Id"']
rows += ['"{}"'.format(record["Id"]) for record in batch]
resp = requests.post(uri, data="\n".join(rows), headers=headers)
content = resp.content
if resp.status_code >= 400:
self.bulk.raise_error(content, resp.status_code)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.bulk.jobNS)
yield batch_id
class LoadData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "The database url to a database containing the test data to load",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"start_step": {
"description": "If specified, skip steps before this one in the mapping",
"required": False,
},
"sql_path": {
"description": "If specified, a database will be created from an SQL script at the provided path"
},
}
def _init_options(self, kwargs):
super(LoadData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
if not os.path.isfile(self.options["sql_path"]):
raise TaskOptionsError(
"File {} does not exist".format(self.options["sql_path"])
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
def _run_task(self):
self._init_mapping()
self._init_db()
start_step = self.options.get("start_step")
started = False
for name, mapping in self.mapping.items():
# Skip steps until start_step
if not started and start_step and name != start_step:
self.logger.info("Skipping step: {}".format(name))
continue
started = True
self.logger.info("Running Job: {}".format(name))
result = self._load_mapping(mapping)
if result != "Completed":
break
def _load_mapping(self, mapping):
"""Load data for a single step."""
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
job_id, local_ids_for_batch = self._create_job(mapping)
result = self._wait_for_job(job_id)
# We store inserted ids even if some batches failed
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
def _create_job(self, mapping):
"""Initiate a bulk insert and upload batches to run in parallel."""
job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV")
self.logger.info(" Created bulk job {}".format(job_id))
# Upload batches
local_ids_for_batch = {}
for batch_file, local_ids in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(" Uploaded batch {}".format(batch_id))
self.bulk.close_job(job_id)
return job_id, local_ids_for_batch
def _get_batches(self, mapping, batch_size=10000):
"""Get data from the local db"""
action = mapping.get("action", "insert")
fields = mapping.get("fields", {}).copy()
static = mapping.get("static", {})
lookups = mapping.get("lookups", {})
record_type = mapping.get("record_type")
# Skip Id field on insert
if action == "insert" and "Id" in fields:
del fields["Id"]
# Build the list of fields to import
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append("RecordTypeId")
# default to the profile assigned recordtype if we can't find any
# query for the RT by developer name
query = (
"SELECT Id FROM RecordType WHERE SObjectType='{0}'"
"AND DeveloperName = '{1}' LIMIT 1"
)
record_type_id = self.sf.query(
query.format(mapping.get("sf_object"), record_type)
)["records"][0]["Id"]
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return batch_file, writer, batch_ids
batch_file, writer, batch_ids = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + list(static.values())
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
# Yield and start a new file every [batch_size] rows
if not total_rows % batch_size:
batch_file.seek(0)
self.logger.info(" Processing batch {}".format(batch_num))
yield batch_file, batch_ids
batch_file, writer, batch_ids = start_batch()
batch_num += 1
# Yield result file for final batch
if batch_ids:
batch_file.seek(0)
yield batch_file, batch_ids
self.logger.info(
" Prepared {} rows for import to {}".format(
total_rows, mapping["sf_object"]
)
)
def _query_db(self, mapping):
"""Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
"""
model = self.models[mapping.get("table")]
# Use primary key instead of the field mapped to SF Id
fields = mapping.get("fields", {}).copy()
if mapping["oid_as_pk"]:
del fields["Id"]
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get("lookups", {}).copy()
for lookup in lookups.values():
lookup["aliased_table"] = aliased(
self.metadata.tables["{}_sf_ids".format(lookup["table"])]
)
columns.append(lookup["aliased_table"].columns.sf_id)
query = self.session.query(*columns)
if "record_type" in mapping and hasattr(model, "record_type"):
query = query.filter(model.record_type == mapping["record_type"])
if "filters" in mapping:
filter_args = []
for f in mapping["filters"]:
filter_args.append(text(f))
query = query.filter(*filter_args)
for sf_field, lookup in lookups.items():
# Outer join with lookup ids table:
# returns main obj even if lookup is null
key_field = get_lookup_key_field(lookup, sf_field)
value_column = getattr(model, key_field)
query = query.outerjoin(
lookup["aliased_table"],
lookup["aliased_table"].columns.id == value_column,
)
# Order by foreign key to minimize lock contention
# by trying to keep lookup targets in the same batch
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
def _convert(self, value):
if value:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
"""Get the job results and store inserted SF Ids in a new table"""
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for batch_id, local_ids in local_ids_for_batch.items():
try:
results_url = "{}/job/{}/batch/{}/result".format(
self.bulk.endpoint, job_id, batch_id
)
# Download entire result file to a temporary file first
# to avoid the server dropping connections
with _download_file(results_url, self.bulk) as f:
self.logger.info(
" Downloaded results for batch {}".format(batch_id)
)
self._store_inserted_ids_for_batch(
f, local_ids, id_table_name, conn
)
self.logger.info(
" Updated {} for batch {}".format(id_table_name, batch_id)
)
except Exception: # pragma: nocover
# If we can't download one result file,
# don't let that stop us from downloading the others
self.logger.error(
"Could not download batch results: {}".format(batch_id)
)
continue
self.session.commit()
def _reset_id_table(self, mapping):
"""Create an empty table to hold the inserted SF Ids"""
if not hasattr(self, "_initialized_id_tables"):
self._initialized_id_tables = set()
id_table_name = "{}_sf_ids".format(mapping["table"])
if id_table_name not in self._initialized_id_tables:
if id_table_name in self.metadata.tables:
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(
id_table_name,
self.metadata,
Column("id", Unicode(255), primary_key=True),
Column("sf_id", Unicode(18)),
)
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
def _store_inserted_ids_for_batch(
self, result_file, local_ids, id_table_name, conn
):
# Set up a function to generate rows based on this result file
def produce_csv():
"""Iterate over job results and prepare rows for id table"""
reader = unicodecsv.reader(result_file)
next(reader) # skip header
i = 0
for row, local_id in zip(reader, local_ids):
if row[1] == "true": # Success
sf_id = row[0]
yield "{},{}\n".format(local_id, sf_id).encode("utf-8")
else:
self.logger.warning(" Error on row {}: {}".format(i, row[3]))
i += 1
# Bulk insert rows into id table
columns = ("id", "sf_id")
data_file = IteratorBytesIO(produce_csv())
self._sql_bulk_insert_from_csv(conn, id_table_name, columns, data_file)
def _sqlite_load(self):
conn = self.session.connection()
cursor = conn.connection.cursor()
with open(self.options["sql_path"], "r") as f:
try:
cursor.executescript(f.read())
finally:
cursor.close()
# self.session.flush()
def _init_db(self):
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize the DB session
self.session = Session(self.engine)
if self.options.get("sql_path"):
self._sqlite_load()
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# Loop through mappings and reflect each referenced table
self.models = {}
for name, mapping in self.mapping.items():
if "table" in mapping and mapping["table"] not in self.models:
self.models[mapping["table"]] = self.base.classes[mapping["table"]]
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mapping = ordered_yaml_load(f)
class QueryData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "A DATABASE_URL where the query output should be written",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"sql_path": {
"description": "If set, an SQL script will be generated at the path provided "
+ "This is useful for keeping data in the repository and allowing diffs."
},
}
def _init_options(self, kwargs):
super(QueryData, self)._init_options(kwargs)
if self.options.get("sql_path"):
if self.options.get("database_url"):
raise TaskOptionsError(
"The database_url option is set dynamically with the sql_path option. Please unset the database_url option."
)
self.logger.info("Using in-memory sqlite database")
self.options["database_url"] = "sqlite://"
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
def _run_task(self):
self._init_mapping()
self._init_db()
for mapping in self.mappings.values():
soql = self._soql_for_mapping(mapping)
self._run_query(soql, mapping)
self._drop_sf_id_columns()
if self.options.get("sql_path"):
self._sqlite_dump()
def _init_db(self):
self.models = {}
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# Create the tables
self._create_tables()
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# initialize session
self.session = create_session(bind=self.engine, autocommit=False)
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mappings = ordered_yaml_load(f)
def _soql_for_mapping(self, mapping):
sf_object = mapping["sf_object"]
fields = []
if not mapping["oid_as_pk"]:
fields.append("Id")
fields += [field["sf"] for field in self._fields_for_mapping(mapping)]
soql = "SELECT {fields} FROM {sf_object}".format(
**{"fields": ", ".join(fields), "sf_object": sf_object}
)
if "record_type" in mapping:
soql += " WHERE RecordType.DeveloperName = '{}'".format(
mapping["record_type"]
)
return soql
def _run_query(self, soql, mapping):
self.logger.info("Creating bulk job for: {sf_object}".format(**mapping))
job = self.bulk.create_query_job(mapping["sf_object"], contentType="CSV")
self.logger.info("Job id: {0}".format(job))
self.logger.info("Submitting query: {}".format(soql))
batch = self.bulk.query(job, soql)
self.logger.info("Batch id: {0}".format(batch))
self.bulk.wait_for_batch(job, batch)
self.logger.info("Batch {0} finished".format(batch))
self.bulk.close_job(job)
self.logger.info("Job {0} closed".format(job))
conn = self.session.connection()
for result_file in self._get_results(batch, job):
self._import_results(mapping, result_file, conn)
def _get_results(self, batch_id, job_id):
result_ids = self.bulk.get_query_batch_result_ids(batch_id, job_id=job_id)
for result_id in result_ids:
self.logger.info("Result id: {}".format(result_id))
uri = "{}/job/{}/batch/{}/result/{}".format(
self.bulk.endpoint, job_id, batch_id, result_id
)
with _download_file(uri, self.bulk) as f:
self.logger.info("Result {} downloaded".format(result_id))
yield f
def _import_results(self, mapping, result_file, conn):
# Map SF field names to local db column names
sf_header = [
name.strip('"')
for name in result_file.readline().strip().decode("utf-8").split(",")
]
columns = []
lookup_keys = []
for sf in sf_header:
if sf == "Records not found for this query":
return
if sf:
column = mapping.get("fields", {}).get(sf)
if not column:
lookup = mapping.get("lookups", {}).get(sf, {})
if lookup:
lookup_keys.append(sf)
column = get_lookup_key_field(lookup, sf)
if column:
columns.append(column)
if not columns:
return
record_type = mapping.get("record_type")
if record_type:
columns.append("record_type")
processor = log_progress(
process_incoming_rows(result_file, record_type), self.logger
)
data_file = IteratorBytesIO(processor)
if mapping["oid_as_pk"]:
self._sql_bulk_insert_from_csv(conn, mapping["table"], columns, data_file)
else:
# If using the autogenerated id field, split out the CSV file from the Bulk API
# into two separate files and load into the main table and the sf_id_table
with tempfile.TemporaryFile("w+b") as f_values:
with tempfile.TemporaryFile("w+b") as f_ids:
data_file_values, data_file_ids = self._split_batch_csv(
data_file, f_values, f_ids
)
self._sql_bulk_insert_from_csv(
conn, mapping["table"], columns, data_file_values
)
self._sql_bulk_insert_from_csv(
conn, mapping["sf_id_table"], ["sf_id"], data_file_ids
)
self.session.commit()
if lookup_keys and not mapping["oid_as_pk"]:
self._convert_lookups_to_id(mapping, lookup_keys)
def _get_mapping_for_table(self, table):
""" Returns the first mapping for a table name """
for mapping in self.mappings.values():
if mapping["table"] == table:
return mapping
def _split_batch_csv(self, data_file, f_values, f_ids):
writer_values = unicodecsv.writer(f_values)
writer_ids = unicodecsv.writer(f_ids)
for row in unicodecsv.reader(data_file):
writer_values.writerow(row[1:])
writer_ids.writerow([row[:1]])
f_values.seek(0)
f_ids.seek(0)
return f_values, f_ids
def _convert_lookups_to_id(self, mapping, lookup_keys):
for lookup_key in lookup_keys:
lookup_dict = mapping["lookups"][lookup_key]
model = self.models[mapping["table"]]
lookup_mapping = self._get_mapping_for_table(lookup_dict["table"])
lookup_model = self.models[lookup_mapping["sf_id_table"]]
key_field = get_lookup_key_field(lookup_dict, lookup_key)
key_attr = getattr(model, key_field)
try:
self.session.query(model).filter(
key_attr.isnot(None), key_attr == lookup_model.sf_id
).update({key_attr: lookup_model.id}, synchronize_session=False)
except NotImplementedError:
# Some databases such as sqlite don't support multitable update
mappings = []
for row, lookup_id in self.session.query(model, lookup_model.id).join(
lookup_model, key_attr == lookup_model.sf_id
):
mappings.append({"id": row.id, key_field: lookup_id})
self.session.bulk_update_mappings(model, mappings)
self.session.commit()
def _create_tables(self):
for mapping in self.mappings.values():
self._create_table(mapping)
self.metadata.create_all()
def _create_table(self, mapping):
model_name = "{}Model".format(mapping["table"])
mapper_kwargs = {}
table_kwargs = {}
self.models[mapping["table"]] = type(model_name, (object,), {})
# Provide support for legacy mappings which used the OID as the pk but
# default to using an autoincrementing int pk and a separate sf_id column
fields = []
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
if mapping["oid_as_pk"]:
id_column = mapping["fields"]["Id"]
fields.append(Column(id_column, Unicode(255), primary_key=True))
else:
fields.append(Column("id", Integer(), primary_key=True, autoincrement=True))
for field in self._fields_for_mapping(mapping):
if mapping["oid_as_pk"] and field["sf"] == "Id":
continue
fields.append(Column(field["db"], Unicode(255)))
if "record_type" in mapping:
fields.append(Column("record_type", Unicode(255)))
t = Table(mapping["table"], self.metadata, *fields, **table_kwargs)
if t.exists():
raise BulkDataException("Table already exists: {}".format(mapping["table"]))
if not mapping["oid_as_pk"]:
mapping["sf_id_table"] = mapping["table"] + "_sf_id"
# If multiple mappings point to the same table, don't recreate the table
if mapping["sf_id_table"] not in self.models:
sf_id_model_name = "{}Model".format(mapping["sf_id_table"])
self.models[mapping["sf_id_table"]] = type(
sf_id_model_name, (object,), {}
)
sf_id_fields = [
Column("id", Integer(), primary_key=True, autoincrement=True),
Column("sf_id", Unicode(24)),
]
id_t = Table(mapping["sf_id_table"], self.metadata, *sf_id_fields)
mapper(self.models[mapping["sf_id_table"]], id_t)
mapper(self.models[mapping["table"]], t, **mapper_kwargs)
def _fields_for_mapping(self, mapping):
fields = []
for sf_field, db_field in mapping.get("fields", {}).items():
fields.append({"sf": sf_field, "db": db_field})
for sf_field, lookup in mapping.get("lookups", {}).items():
fields.append(
{"sf": sf_field, "db": get_lookup_key_field(lookup, sf_field)}
)
return fields
def _drop_sf_id_columns(self):
for mapping in self.mappings.values():
if mapping.get("oid_as_pk"):
continue
self.metadata.tables[mapping["sf_id_table"]].drop()
def _sqlite_dump(self):
path = self.options["sql_path"]
if os.path.exists(path):
os.remove(path)
with open(path, "w") as f:
for line in self.session.connection().connection.iterdump():
f.write(line + "\n")
@contextmanager
def _download_file(uri, bulk_api):
"""Download the bulk API result file for a single batch"""
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile("w+b") as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
yield f
def process_incoming_rows(f, record_type=None):
if record_type and not isinstance(record_type, bytes):
record_type = record_type.encode("utf-8")
for line in f:
if record_type:
yield line.rstrip() + b"," + record_type + b"\n"
else:
yield line
def get_lookup_key_field(lookup, sf_field):
return lookup.get("key_field", convert_to_snake_case(sf_field))
| 39.35723 | 150 | 0.59038 | [
"BSD-3-Clause"
] | davidmreed/CumulusCI | cumulusci/tasks/bulkdata.py | 32,391 | Python |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for diagnostics."""
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import progress_tracker
class Diagnostic(object):
"""Base class for diagnostics.
Attributes:
intro: A message to introduce the objectives and tasks of the diagnostic.
title: The name of the diagnostic.
checklist: An iterator of checkbase.Check objects to be run by the
diagnostic.
"""
_MAX_RETRIES = 5
def __init__(self, intro, title, checklist):
"""Initializes Diagnostic with neccessary attributes.
Args:
intro: A message to introduce the objectives and tasks of the diagnostic.
title: The name of the diagnostic.
checklist: An iterable of checkbase.Check objects to be run by the
diagnostic.
"""
self.intro = intro
self.title = title
self.checklist = checklist
def RunChecks(self):
"""Runs one or more checks, tries fixes, and outputs results.
Returns:
True if the diagnostic ultimately passed.
"""
self._Print(self.intro)
num_checks_passed = 0
for check in self.checklist:
result, fixer = self._RunCheck(check)
if properties.VALUES.core.disable_prompts.GetBool():
continue
# If the initial check failed, and a fixer is available try to fix issue
# and recheck.
num_retries = 0
while not result.passed and fixer and num_retries < self._MAX_RETRIES:
num_retries += 1
should_check_again = fixer()
if should_check_again:
result, fixer = self._RunCheck(check, first_run=False)
else:
fixer = None
if not result.passed and fixer and num_retries == self._MAX_RETRIES:
log.warn('Unable to fix {0} failure after {1} attempts.'.format(
self.title, num_retries))
if result.passed:
num_checks_passed += 1
num_checks = len(self.checklist)
passed = (num_checks_passed == num_checks)
summary = '{check} ({num_passed}/{num_checks} checks) {passed}.\n'.format(
check=self.title, num_passed=num_checks_passed, num_checks=num_checks,
passed='passed' if passed else 'failed')
self._Print(summary, as_error=not passed)
return passed
def _RunCheck(self, check, first_run=True):
with progress_tracker.ProgressTracker('{0} {1}'.format(
'Checking' if first_run else 'Rechecking', check.issue)):
result, fixer = check.Check(first_run=first_run)
self._PrintResult(result)
return result, fixer
def _Print(self, message, as_error=False):
logger = log.status.Print if not as_error else log.error
logger(message)
def _PrintResult(self, result):
self._Print(result.message, not result.passed)
| 33.207921 | 79 | 0.699463 | [
"Apache-2.0"
] | KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/diagnostics/diagnostic_base.py | 3,354 | Python |
# -----------------------------------------------------------------------------
# Copyright (c) 2020 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
import hypyp.ext.mpl3d.glm as glm
from hypyp.ext.mpl3d.trackball import Trackball
class Camera:
"""
Interactive trackball camera.
This camera can be used for static or interactive rendering with mouse
controlled movements. In this latter case, it is necessary to connect the
camera to a matplotlib axes using the `connect` method and to provide an
update function that will be called each time an update is necessary
relatively to the new transform.
In any case, the camera transformation is kept in the `Camera.transform`
variable.
"""
def __init__(self, mode="perspective", theta=0, phi=0, scale=1):
"""
mode : str
camera mode ("ortho" or "perspective")
theta: float
angle around z axis (degrees)
phi: float
angle around x axis (degrees)
scale: float
scale factor
view : array (4x4)
"""
self.trackball = Trackball(theta, phi)
self.aperture = 35
self.aspect = 1
self.near = 1
self.far = 100
self.mode = mode
self.scale = scale
self.zoom = 1
self.zoom_max = 5.0
self.zoom_min = 0.1
self.view = glm.translate(0, 0, -3) @ glm.scale(scale)
if mode == "ortho":
self.proj = glm.ortho(-1, +1, -1, +1, self.near, self.far)
else:
self.proj = glm.perspective(self.aperture, self.aspect, self.near, self.far)
self.transform = self.proj @ self.view @ self.trackball.model.T
def connect(self, axes, update):
"""
axes : matplotlib.Axes
Axes where to connect this camera to
update: function(transform)
Function to be called with the new transform to update the scene
(transform is a 4x4 matrix).
"""
self.figure = axes.get_figure()
self.axes = axes
self.update = update
self.mouse = None
self.cidpress = self.figure.canvas.mpl_connect("scroll_event", self.on_scroll)
self.cidpress = self.figure.canvas.mpl_connect(
"button_press_event", self.on_press
)
self.cidrelease = self.figure.canvas.mpl_connect(
"button_release_event", self.on_release
)
self.cidmotion = self.figure.canvas.mpl_connect(
"motion_notify_event", self.on_motion
)
def format_coord(*args):
phi = self.trackball.phi
theta = self.trackball.theta
return "Θ : %.1f, ɸ: %.1f" % (theta, phi)
self.axes.format_coord = format_coord
def on_scroll(self, event):
"""
Scroll event for zooming in/out
"""
if event.inaxes != self.axes:
return
if event.button == "up":
self.zoom = max(0.9 * self.zoom, self.zoom_min)
elif event.button == "down":
self.zoom = min(1.1 * self.zoom, self.zoom_max)
self.axes.set_xlim(-self.zoom, self.zoom)
self.axes.set_ylim(-self.zoom, self.zoom)
self.figure.canvas.draw()
def on_press(self, event):
"""
Press event to initiate a drag
"""
if event.inaxes != self.axes:
return
self.mouse = event.button, event.xdata, event.ydata
def on_motion(self, event):
"""
Motion event to rotate the scene
"""
if self.mouse is None:
return
if event.inaxes != self.axes:
return
button, x, y = event.button, event.xdata, event.ydata
dx, dy = x - self.mouse[1], y - self.mouse[2]
self.mouse = button, x, y
self.trackball.drag_to(x, y, dx, dy)
self.transform = self.proj @ self.view @ self.trackball.model.T
self.update(self.transform)
self.figure.canvas.draw()
def on_release(self, event):
"""
End of drag event
"""
self.mouse = None
def disconnect(self):
"""
Disconnect camera from the axes
"""
self.figure.canvas.mpl_disconnect(self.cidpress)
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
| 31.3125 | 88 | 0.565314 | [
"BSD-3-Clause"
] | FranckPrts/HyPyP | hypyp/ext/mpl3d/camera.py | 4,511 | Python |
import numpy as np
import os
import pickle
import pytest
import re
import time
import shutil
from copy import deepcopy
from numpy import allclose, isclose
from flare import struc, env, gp
from flare.parameters import Parameters
from flare.mgp import MappedGaussianProcess
from flare.lammps import lammps_calculator
from flare.utils.element_coder import _Z_to_mass, _Z_to_element, _element_to_Z
from flare.ase.calculator import FLARE_Calculator
from flare.ase.atoms import FLARE_Atoms
from ase.calculators.lammpsrun import LAMMPS
from .fake_gp import get_gp, get_random_structure
from .mgp_test import clean, compare_triplet, predict_atom_diag_var
body_list = ["2", "3"]
multi_list = [True, False]
force_block_only = False
curr_path = os.getcwd()
@pytest.mark.skipif(
not os.environ.get("lmp", False),
reason=(
"lmp not found "
"in environment: Please install LAMMPS "
"and set the $lmp env. "
"variable to point to the executatble."
),
)
@pytest.fixture(scope="module")
def all_gp():
allgp_dict = {}
np.random.seed(123)
for bodies in body_list:
for multihyps in multi_list:
gp_model = get_gp(
bodies,
"mc",
multihyps,
cellabc=[1.5, 1, 2],
force_only=force_block_only,
noa=5,
)
gp_model.parallel = True
gp_model.n_cpus = 2
allgp_dict[f"{bodies}{multihyps}"] = gp_model
yield allgp_dict
del allgp_dict
@pytest.fixture(scope="module")
def all_mgp():
allmgp_dict = {}
for bodies in ["2", "3", "2+3"]:
for multihyps in [False, True]:
allmgp_dict[f"{bodies}{multihyps}"] = None
yield allmgp_dict
del allmgp_dict
@pytest.fixture(scope="module")
def all_lmp():
all_lmp_dict = {}
species = ["H", "He"]
specie_symbol_list = " ".join(species)
masses = [
f"{i} {_Z_to_mass[_element_to_Z[species[i]]]}" for i in range(len(species))
]
parameters = {
"command": os.environ.get("lmp"), # set up executable for ASE
"newton": "off",
"pair_style": "mgp",
"mass": masses,
}
# set up input params
for bodies in body_list:
for multihyps in multi_list:
# create ASE calc
label = f"{bodies}{multihyps}"
files = [f"{label}.mgp"]
by = "yes" if bodies == "2" else "no"
ty = "yes" if bodies == "3" else "no"
parameters["pair_coeff"] = [
f"* * {label}.mgp {specie_symbol_list} {by} {ty}"
]
lmp_calc = LAMMPS(
label=label,
keep_tmp_files=True,
tmp_dir="./tmp/",
parameters=parameters,
files=files,
specorder=species,
)
all_lmp_dict[f"{bodies}{multihyps}"] = lmp_calc
yield all_lmp_dict
del all_lmp_dict
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_init(bodies, multihyps, all_mgp, all_gp):
"""
test the init function
"""
clean()
gp_model = all_gp[f"{bodies}{multihyps}"]
# grid parameters
grid_params = {}
if "2" in bodies:
grid_params["twobody"] = {"grid_num": [160], "lower_bound": [0.02]}
if "3" in bodies:
grid_params["threebody"] = {"grid_num": [31, 32, 33], "lower_bound": [0.02] * 3}
lammps_location = f"{bodies}{multihyps}"
data = gp_model.training_statistics
try:
mgp_model = MappedGaussianProcess(
grid_params=grid_params,
unique_species=data["species"],
n_cpus=1,
lmp_file_name=lammps_location,
var_map="simple",
)
except:
mgp_model = MappedGaussianProcess(
grid_params=grid_params,
unique_species=data["species"],
n_cpus=1,
lmp_file_name=lammps_location,
var_map=None,
)
all_mgp[f"{bodies}{multihyps}"] = mgp_model
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_build_map(all_gp, all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
gp_model = all_gp[f"{bodies}{multihyps}"]
mgp_model = all_mgp[f"{bodies}{multihyps}"]
mgp_model.build_map(gp_model)
# with open(f'grid_{bodies}_{multihyps}.pickle', 'wb') as f:
# pickle.dump(mgp_model, f)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_write_model(all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
mgp_model = all_mgp[f"{bodies}{multihyps}"]
mgp_model.write_model(f"my_mgp_{bodies}_{multihyps}")
mgp_model.write_model(f"my_mgp_{bodies}_{multihyps}", format="pickle")
# Ensure that user is warned when a non-mean_only
# model is serialized into a Dictionary
with pytest.warns(Warning):
mgp_model.var_map = "pca"
mgp_model.as_dict()
mgp_model.var_map = "simple"
mgp_model.as_dict()
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_load_model(all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
name = f"my_mgp_{bodies}_{multihyps}.json"
all_mgp[f"{bodies}{multihyps}"] = MappedGaussianProcess.from_file(name)
os.remove(name)
name = f"my_mgp_{bodies}_{multihyps}.pickle"
all_mgp[f"{bodies}{multihyps}"] = MappedGaussianProcess.from_file(name)
os.remove(name)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_cubic_spline(all_gp, all_mgp, bodies, multihyps):
"""
test the predict for mc_simple kernel
"""
mgp_model = all_mgp[f"{bodies}{multihyps}"]
delta = 1e-4
if "3" in bodies:
body_name = "threebody"
elif "2" in bodies:
body_name = "twobody"
nmap = len(mgp_model.maps[body_name].maps)
print("nmap", nmap)
for i in range(nmap):
maxvalue = np.max(np.abs(mgp_model.maps[body_name].maps[i].mean.__coeffs__))
if maxvalue > 0:
comp_code = mgp_model.maps[body_name].maps[i].species_code
if "3" in bodies:
c_pt = np.array([[0.3, 0.4, 0.5]])
c, cderv = (
mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
)
cderv = cderv.reshape([-1])
for j in range(3):
a_pt = deepcopy(c_pt)
b_pt = deepcopy(c_pt)
a_pt[0][j] += delta
b_pt[0][j] -= delta
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
num_derv = (a - b) / (2 * delta)
print("spline", comp_code, num_derv, cderv[j])
assert np.isclose(num_derv, cderv[j], rtol=1e-2)
elif "2" in bodies:
center = np.sum(mgp_model.maps[body_name].maps[i].bounds) / 2.0
a_pt = np.array([[center + delta]])
b_pt = np.array([[center - delta]])
c_pt = np.array([[center]])
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
c, cderv = (
mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
)
cderv = cderv.reshape([-1])[0]
num_derv = (a - b) / (2 * delta)
print("spline", num_derv, cderv)
assert np.isclose(num_derv, cderv, rtol=1e-2)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_predict(all_gp, all_mgp, bodies, multihyps):
"""
test the predict for mc_simple kernel
"""
gp_model = all_gp[f"{bodies}{multihyps}"]
mgp_model = all_mgp[f"{bodies}{multihyps}"]
# # debug
# filename = f'grid_{bodies}_{multihyps}.pickle'
# with open(filename, 'rb') as f:
# mgp_model = pickle.load(f)
nenv = 6
cell = 1.0 * np.eye(3)
cutoffs = gp_model.cutoffs
unique_species = gp_model.training_statistics["species"]
struc_test, f = get_random_structure(cell, unique_species, nenv)
test_envi = env.AtomicEnvironment(
struc_test, 0, cutoffs, cutoffs_mask=gp_model.hyps_mask
)
if "2" in bodies:
kernel_name = "twobody"
elif "3" in bodies:
kernel_name = "threebody"
# compare_triplet(mgp_model.maps['threebody'], gp_model, test_envi)
assert Parameters.compare_dict(
gp_model.hyps_mask, mgp_model.maps[kernel_name].hyps_mask
)
gp_pred_en, gp_pred_envar = gp_model.predict_local_energy_and_var(test_envi)
gp_pred = np.array([gp_model.predict(test_envi, d + 1) for d in range(3)]).T
print("mgp pred")
mgp_pred = mgp_model.predict(test_envi)
# check mgp is within 2 meV/A of the gp
map_str = "energy"
gp_pred_var = gp_pred_envar
print("mgp_en, gp_en", mgp_pred[3], gp_pred_en)
assert np.allclose(mgp_pred[3], gp_pred_en, rtol=2e-3), f"{bodies} body" \
f" {map_str} mapping is wrong"
# if multihyps and ('3' in bodies):
# pytest.skip()
print("mgp_pred", mgp_pred[0])
print("gp_pred", gp_pred[0])
print("isclose?", mgp_pred[0] - gp_pred[0], gp_pred[0])
assert np.allclose(
mgp_pred[0], gp_pred[0], atol=1e-3
), f"{bodies} body {map_str} mapping is wrong"
if mgp_model.var_map == "simple":
print(bodies, multihyps)
for i in range(struc_test.nat):
test_envi = env.AtomicEnvironment(
struc_test, i, cutoffs, cutoffs_mask=gp_model.hyps_mask
)
mgp_pred = mgp_model.predict(test_envi)
mgp_var = mgp_pred[1]
gp_var = predict_atom_diag_var(test_envi, gp_model, kernel_name)
print("mgp_var, gp_var", mgp_var, gp_var)
assert np.allclose(mgp_var, gp_var, rtol=1e-2)
print("struc_test positions", struc_test.positions, struc_test.species_labels)
@pytest.mark.skipif(
not os.environ.get("lmp", False),
reason=(
"lmp not found "
"in environment: Please install LAMMPS "
"and set the $lmp env. "
"variable to point to the executatble."
),
)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_lmp_predict(all_lmp, all_gp, all_mgp, bodies, multihyps):
"""
test the lammps implementation
"""
# pytest.skip()
prefix = f"{bodies}{multihyps}"
mgp_model = all_mgp[prefix]
gp_model = all_gp[prefix]
lmp_calculator = all_lmp[prefix]
ase_calculator = FLARE_Calculator(gp_model, mgp_model, par=False, use_mapping=True)
# create test structure
np.random.seed(1)
cell = np.diag(np.array([1, 1, 1])) * 4
nenv = 10
unique_species = gp_model.training_statistics["species"]
cutoffs = gp_model.cutoffs
struc_test, f = get_random_structure(cell, unique_species, nenv)
# build ase atom from struc
ase_atoms_flare = struc_test.to_ase_atoms()
ase_atoms_flare = FLARE_Atoms.from_ase_atoms(ase_atoms_flare)
ase_atoms_flare.set_calculator(ase_calculator)
ase_atoms_lmp = deepcopy(struc_test).to_ase_atoms()
ase_atoms_lmp.set_calculator(lmp_calculator)
try:
lmp_en = ase_atoms_lmp.get_potential_energy()
flare_en = ase_atoms_flare.get_potential_energy()
lmp_stress = ase_atoms_lmp.get_stress()
flare_stress = ase_atoms_flare.get_stress()
lmp_forces = ase_atoms_lmp.get_forces()
flare_forces = ase_atoms_flare.get_forces()
except Exception as e:
os.chdir(curr_path)
print(e)
raise e
os.chdir(curr_path)
# check that lammps agrees with mgp to within 1 meV/A
print("energy", lmp_en - flare_en, flare_en)
assert np.isclose(lmp_en, flare_en, atol=1e-3)
print("force", lmp_forces - flare_forces, flare_forces)
assert np.isclose(lmp_forces, flare_forces, atol=1e-3).all()
print("stress", lmp_stress - flare_stress, flare_stress)
assert np.isclose(lmp_stress, flare_stress, atol=1e-3).all()
# check the lmp var
# mgp_std = np.sqrt(mgp_pred[1])
# print("isclose? diff:", lammps_stds[atom_num]-mgp_std, "mgp value", mgp_std)
# assert np.isclose(lammps_stds[atom_num], mgp_std, rtol=1e-2)
clean(prefix=prefix)
| 31.210269 | 91 | 0.617 | [
"MIT"
] | aaronchen0316/flare | tests/test_mgp.py | 12,765 | Python |
# GENERATED VERSION FILE
# TIME: Fri May 21 12:58:40 2021
__version__ = '0.1.0+62e3868'
short_version = '0.1.0'
version_info = (0, 1, 0)
| 22.833333 | 32 | 0.686131 | [
"Apache-2.0"
] | callzhang/qdtrack | qdtrack/version.py | 137 | Python |
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
name="conda-store",
version='0.3.8',
url="https://github.com/Quansight/conda-store",
author="Chris Ostrouchov",
description="A client to interface with conda-store",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=[],
python_requires=">=3.6",
license="BSD-3-Clause",
platforms="Linux, Mac OS X, Windows",
keywords=["conda-store"],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
project_urls={
"Bug Reports": "https://github.com/quansight/conda-store",
"Documentation": "https://conda-store.readthedocs.io/",
"Source": "https://github.com/quansight/conda-store",
},
)
| 34.128205 | 67 | 0.637115 | [
"BSD-3-Clause"
] | datalayer-externals/conda-store | conda-store/setup.py | 1,331 | Python |
from PLOI.guidance.base_guidance import BaseSearchGuidance
from PLOI.guidance.no_guidance import NoSearchGuidance
from PLOI.guidance.gnn_guidance import GNNSearchGuidance
| 42.75 | 58 | 0.894737 | [
"MIT"
] | tomsilver/ploi | guidance/__init__.py | 171 | Python |
"""
MTGJSON EnumValues Object
"""
import json
import logging
import pathlib
from typing import Any, Dict, List, Union
from ..compiled_classes.mtgjson_all_printings import MtgjsonAllPrintingsObject
from ..consts import OUTPUT_PATH
from ..utils import sort_internal_lists
from .mtgjson_structures import MtgjsonStructuresObject
LOGGER = logging.getLogger(__name__)
class MtgjsonEnumValuesObject:
"""
MTGJSON EnumValues Object
"""
attr_value_dict: Dict[str, Union[Dict[str, List[str]], List[str]]]
set_key_struct = {
"card": [
"availability",
"borderColor",
"colorIdentity",
"colorIndicator",
"colors",
"duelDeck",
"frameEffects",
"frameVersion",
"layout",
"promoTypes",
"rarity",
"side",
"subtypes",
"supertypes",
"types",
"watermark",
],
"set": ["type"],
"foreignData": ["language"],
}
deck_key_struct = {"deck": ["type"]}
def __init__(self) -> None:
"""
Initializer to build the internal mapping
"""
self.attr_value_dict = {}
set_and_cards = self.construct_set_and_card_enums(
MtgjsonAllPrintingsObject().to_json()
)
self.attr_value_dict.update(set_and_cards)
decks = self.construct_deck_enums(OUTPUT_PATH.joinpath("decks"))
self.attr_value_dict.update(decks)
# Load in pre-generated Keywords content
keywords = OUTPUT_PATH.joinpath(MtgjsonStructuresObject().key_words + ".json")
if not keywords.is_file():
LOGGER.warning(f"Unable to find {keywords}")
else:
with keywords.open(encoding="utf-8") as file:
content = json.load(file).get("data", {})
self.attr_value_dict.update({"keywords": content})
def construct_deck_enums(self, decks_directory: pathlib.Path) -> Dict[str, Any]:
"""
Given Decks Path, compile enums based on the types found in the files
:param decks_directory: Path to the decks/ output directory
:return Sorted list of enum options for each key
"""
type_map: Dict[str, Any] = {}
for object_name, object_values in self.deck_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for deck in decks_directory.glob("**/*.json"):
with deck.open(encoding="utf-8") as file:
content = json.load(file).get("data", {})
for key in content.keys():
if key in self.deck_key_struct["deck"]:
type_map["deck"][key].add(content[key])
return dict(sort_internal_lists(type_map))
def construct_set_and_card_enums(
self, all_printing_content: Dict[str, Any]
) -> Dict[str, Any]:
"""
Given AllPrintings, compile enums based on the types found in the file
:param all_printing_content: AllPrintings internally
:return Sorted list of enum options for each key
"""
type_map: Dict[str, Any] = {}
for object_name, object_values in self.set_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for set_contents in all_printing_content.values():
for set_contents_key in set_contents.keys():
if set_contents_key in self.set_key_struct["set"]:
type_map["set"][set_contents_key].add(
set_contents.get(set_contents_key)
)
match_keys = set(self.set_key_struct["card"]).union(
set(self.set_key_struct.keys())
)
for card in set_contents.get("cards", []) + set_contents.get("tokens", []):
for card_key in card.keys():
if card_key not in match_keys:
continue
# Get the value when actually needed
card_value = card[card_key]
# For Dicts, we just enum the keys
if isinstance(card_value, dict):
for value in card_value.keys():
type_map["card"][card_key].add(value)
continue
# String, Integer, etc can be added as-is
if not isinstance(card_value, list):
type_map["card"][card_key].add(card_value)
continue
for single_value in card_value:
# Iterating a non-dict is fine
if not isinstance(single_value, dict):
type_map["card"][card_key].add(single_value)
continue
# Internal attributes are sometimes added
for attribute in self.set_key_struct.get(card_key, []):
type_map[card_key][attribute].add(single_value[attribute])
return dict(sort_internal_lists(type_map))
def to_json(self) -> Dict[str, Union[Dict[str, List[str]], List[str]]]:
"""
Support json.dump()
:return: JSON serialized object
"""
return self.attr_value_dict
| 35.658065 | 87 | 0.564502 | [
"MIT"
] | 0az/mtgjson | mtgjson5/compiled_classes/mtgjson_enum_values.py | 5,527 | Python |
#!/usr/bin/env python3
import sys
import chpl_platform, overrides, third_party_utils
from utils import error, memoize, warning
@memoize
def get():
platform_val = chpl_platform.get('target')
linux = platform_val.startswith('linux64')
osx = platform_val.startswith('darwin')
val = overrides.get('CHPL_UNWIND')
if val == 'libunwind':
warning("CHPL_UNWIND=libunwind is deprecated. Use CHPL_UNWIND=bundled.")
val = 'bundled'
if linux:
if val == 'bundled':
return 'bundled'
elif val == 'system':
return 'system'
if osx:
if val == 'bundled':
error("Using CHPL_UNWIND=bundled is not supported on Mac OS X."
"\nUse CHPL_UNWIND=system instead.", ValueError)
elif val == 'system':
return 'system'
return 'none'
@memoize
def get_uniq_cfg_path():
return third_party_utils.default_uniq_cfg_path()
@memoize
def get_link_args(unwind):
platform_val = chpl_platform.get('target')
osx = platform_val.startswith('darwin')
# Mac OS X supports libunwind in the C library
# it's not actually a special library.
if osx:
return []
libs = []
# Get the link arguments (e.g. -lunwind)
if unwind == 'system':
# Try using pkg-config to get the libraries to link
# libunwind with.
libs = third_party_utils.pkgconfig_get_link_args(
'libunwind', system=True, static=True)
elif unwind == 'bundled':
# the pkg-config file for libunwind is nice, but as of 1.1
# it doesn't include -lzma when it probably should.
# So try to get the libraries out of libunwind.la.
libs = third_party_utils.default_get_link_args(
'libunwind', libs=['libunwind.la', 'libunwind-x86_64.la'])
# add -ldl so that we can call dladdr
if "-ldl" not in libs:
libs.append("-ldl")
return libs
def _main():
unwind_val = get()
sys.stdout.write("{0}\n".format(unwind_val))
if __name__ == '__main__':
_main()
| 27.210526 | 81 | 0.626692 | [
"ECL-2.0",
"Apache-2.0"
] | ShreyasKhandekar/chapel | util/chplenv/chpl_unwind.py | 2,068 | Python |
#!/usr/bin/env python3
# Copyright 2020 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import json
import os
import re
import shutil
import sys
import tempfile
from subprocess import Popen
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
from tools.shared import WINDOWS, CLANG_CXX, EMCC, PIPE
from tools.shared import run_process
from tools.config import V8_ENGINE
from tests.common import EMRUN, test_file
import clang_native
temp_dir = tempfile.mkdtemp()
# System info
system_info = Popen([EMRUN, '--system_info'], stdout=PIPE, stderr=PIPE).communicate()
# Native info
native_info = Popen(['clang', '-v'], stdout=PIPE, stderr=PIPE).communicate()
# Emscripten info
emscripten_info = Popen([EMCC, '-v'], stdout=PIPE, stderr=PIPE).communicate()
def run_benchmark(benchmark_file, results_file, build_args):
# Run native build
out_file = os.path.join(temp_dir, 'benchmark_sse_native')
if WINDOWS:
out_file += '.exe'
cmd = [CLANG_CXX] + clang_native.get_clang_native_args() + [benchmark_file, '-O3', '-o', out_file]
print('Building native version of the benchmark:')
print(' '.join(cmd))
run_process(cmd, env=clang_native.get_clang_native_env())
native_results = Popen([out_file], stdout=PIPE, stderr=PIPE).communicate()
print(native_results[0])
# Run emscripten build
out_file = os.path.join(temp_dir, 'benchmark_sse_html.js')
cmd = [EMCC, benchmark_file, '-O3', '-sTOTAL_MEMORY=536870912', '-o', out_file] + build_args
print('Building Emscripten version of the benchmark:')
print(' '.join(cmd))
run_process(cmd)
cmd = V8_ENGINE + ['--experimental-wasm-simd', os.path.basename(out_file)]
print(' '.join(cmd))
old_dir = os.getcwd()
os.chdir(os.path.dirname(out_file))
wasm_results = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
os.chdir(old_dir)
if not wasm_results:
raise Exception('Unable to run benchmark in V8!')
if not wasm_results[0].strip():
print(wasm_results[1])
sys.exit(1)
print(wasm_results[0])
def strip_comments(text):
return re.sub('//.*?\n|/\*.*?\*/', '', text, re.S) # noqa
benchmark_results = strip_comments(wasm_results[0])
# Strip out unwanted print output.
benchmark_results = benchmark_results[benchmark_results.find('{'):].strip()
if '*************************' in benchmark_results:
benchmark_results = benchmark_results[:benchmark_results.find('*************************')].strip()
print(benchmark_results)
shutil.rmtree(temp_dir)
native_results = json.loads(native_results[0])
benchmark_results = benchmark_results[benchmark_results.index('{'):benchmark_results.rindex('}') + 1]
wasm_results = json.loads(benchmark_results)
# native_workload = native_results['workload']
# html_workload = wasm_results['workload']
html = '''<html><head></head><body><h1>SSE JavaScript Benchmark</h1>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js"></script>
<script src="https://code.highcharts.com/highcharts.js"></script>
<script src="https://code.highcharts.com/modules/exporting.js"></script><b>System Info:</b><br/>
''' + system_info[0].replace('\n', '<br/>') + '''
<b>Native Clang Compiler:</b><br/>
''' + native_info[1].replace('\n', '<br/>') + '''
<b>Emscripten Compiler:</b><br/>
''' + emscripten_info[0].replace('\n', '<br/>')
charts_native = {}
charts_html = {}
for result in native_results['results']:
ch = result['chart']
if ch not in charts_native:
charts_native[ch] = []
charts_native[ch] += [result]
for result in wasm_results['results']:
ch = result['chart']
if ch not in charts_html:
charts_html[ch] = []
charts_html[ch] += [result]
def find_result_in_category(results, category):
for result in results:
if result['category'] == category:
return result
return None
def format_comparison(a, b):
if a < b and a != 0:
return "<span style='color:green;font-weight:bold;'> {:10.2f}".format(b / a) + 'x FASTER</span>'
elif b != 0:
return "<span style='color:red;font-weight:bold;'> {:10.2f}".format(a / b) + 'x SLOWER</span>'
else:
return "<span style='color:red;font-weight:bold;'> NaN </span>"
chartNumber = 0
total_time_native_scalar = 0
total_time_native_simd = 0
total_time_html_scalar = 0
total_time_html_simd = 0
for chart_name in charts_native.keys():
# Extract data for each chart.
categories = []
nativeScalarResults = []
nativeSimdResults = []
htmlScalarResults = []
htmlSimdResults = []
native_results = charts_native[chart_name]
wasm_results = charts_html[chart_name]
textual_results_native = '<p>'
textual_results_html = '<p>'
textual_results_html2 = '<p>'
textual_results_html3 = '<p>'
for result in native_results:
categories += ["'" + result['category'] + "'"]
nsc = result['scalar']
nsi = result['simd']
nativeScalarResults += [str(nsc)]
nativeSimdResults += [str(nsi)]
html_result = find_result_in_category(wasm_results, result['category'])
textual_results_native += 'Native ' + result['category'] + ': ' + "{:10.4f}".format(nsc) + 'ns -> ' + "{:10.4f}".format(nsi) + 'ns. '
textual_results_native += 'Native SSE is ' + format_comparison(nsi, nsc) + ' than native scalar. <br />'
if html_result is not None:
hsc = html_result['scalar']
htmlScalarResults += [str(hsc)]
hsi = html_result['simd']
htmlSimdResults += [str(hsi)]
textual_results_html += 'JS ' + result['category'] + ': ' + "{:10.4f}".format(hsc) + 'ns -> ' + "{:10.4f}".format(hsi) + 'ns. '
textual_results_html += 'JS SSE is ' + format_comparison(hsi, hsc) + ' than JS scalar. <br />'
textual_results_html2 += 'JS ' + result['category'] + ': JS scalar is ' + format_comparison(hsc, nsc) + ' than native scalar. <br />'
textual_results_html3 += 'JS ' + result['category'] + ': JS SSE is ' + format_comparison(hsi, nsi) + ' than native SSE. <br />'
total_time_native_scalar += nsc
total_time_native_simd += nsi
total_time_html_scalar += hsc
total_time_html_simd += hsi
else:
htmlScalarResults += [str(-1)]
htmlSimdResults += [str(-1)]
chartNumber += 1
html += '<div id="chart' + str(chartNumber) + '" style="width:100%; height:400px; margin-top: 100px;"></div>'
html += '''<script>$(function () {
$('#chart''' + str(chartNumber) + '''').highcharts({
chart: {
type: 'column'
},
title: {
text: "''' + chart_name + '''"
},
subtitle: {
text: 'Time per operation in nanoseconds'
},
xAxis: {
categories: [''' + ','.join(categories) + '''
]
},
yAxis: {
min: 0,
title: {
text: 'Time (nanoseconds)'
}
},
tooltip: {
headerFormat: '<span style="font-size:10px">{point.key}</span><table>',
pointFormat: '<tr><td style="color:{series.color};padding:0">{series.name}: </td>' +
'<td style="padding:0"><b>{point.y:.3f} ns</b></td></tr>',
footerFormat: '</table>',
shared: true,
useHTML: true
},
plotOptions: {
column: {
pointPadding: 0.2,
borderWidth: 0
}
},
series: [{
name: 'Native scalar',
data: [''' + ','.join(nativeScalarResults) + ''']
}, {
name: 'Native SSE',
data: [''' + ','.join(nativeSimdResults) + ''']
}, {
name: 'JS scalar',
data: [''' + ','.join(htmlScalarResults) + ''']
}, {
name: 'JS SSE',
data: [''' + ','.join(htmlSimdResults) + ''']
}]
});
});</script>''' + '<table><tr><td>' + textual_results_native + '</td><td>' + textual_results_html + '</td></tr><tr><td>' + textual_results_html2 + '</td><td>' + textual_results_html3 + '</td></tr></table>'
# Final overall score
html += '<div id="overallscore" style="width:100%; height:400px; margin-top: 100px;"></div>'
html += '''<script>$(function () {
$('#overallscore').highcharts({
chart: {
type: 'column'
},
title: {
text: "Overall Execution Time"
},
xAxis: {
categories: ['Total time normalized to native']
},
yAxis: {
min: 0,
title: {
text: 'Relative time'
}
},
tooltip: {
headerFormat: '<span style="font-size:10px">{point.key}</span><table>',
pointFormat: '<tr><td style="color:{series.color};padding:0">{series.name}: </td>' +
'<td style="padding:0"><b>{point.y:.3f}x</b></td></tr>',
footerFormat: '</table>',
shared: true,
useHTML: true
},
plotOptions: {
column: {
pointPadding: 0.2,
borderWidth: 0
}
},
series: [{
name: 'Native scalar',
data: [''' + str(1.0) + ''']
}, {
name: 'Native SSE',
data: [''' + (str(total_time_native_simd / total_time_native_scalar) if total_time_native_scalar != 0 else 'N/A') + ''']
}, {
name: 'JS scalar',
data: [''' + (str(total_time_html_scalar / total_time_native_scalar) if total_time_native_scalar != 0 else 'N/A') + ''']
}, {
name: 'JS SSE',
data: [''' + (str(total_time_html_simd / total_time_native_scalar) if total_time_native_scalar != 0 else 'N/A') + ''']
}]
});
});</script>'''
html += '</body></html>'
open(results_file, 'w').write(html)
print('Wrote ' + str(len(html)) + ' bytes to file ' + results_file + '.')
if __name__ == '__main__':
suite = sys.argv[1].lower() if len(sys.argv) == 2 else None
if suite in ['sse', 'sse1']:
run_benchmark(test_file('sse', 'benchmark_sse1.cpp'), 'results_sse1.html', ['-msse'])
elif suite == 'sse2':
run_benchmark(test_file('sse', 'benchmark_sse2.cpp'), 'results_sse2.html', ['-msse2'])
elif suite == 'sse3':
run_benchmark(test_file('sse', 'benchmark_sse3.cpp'), 'results_sse3.html', ['-msse3'])
elif suite == 'ssse3':
run_benchmark(test_file('sse', 'benchmark_ssse3.cpp'), 'results_ssse3.html', ['-mssse3'])
else:
raise Exception('Usage: python tests/benchmark_sse.py sse1|sse2|sse3')
| 38.581699 | 209 | 0.546586 | [
"MIT"
] | AIEdX/emscripten | tests/benchmark_sse.py | 11,806 | Python |
from django.urls import path, re_path
from .views import *
app_name = 'component'
urlpatterns = [
path('', ComponentListView.as_view(), name='list'),
path('add/', ComponentCreateView.as_view(), name='create'),
re_path(r'^(?P<pk>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/$',
ComponentDetailView.as_view(), name='detail'),
re_path(r'^(?P<pk>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/edit/$',
ComponentUpdateView.as_view(), name='edit'),
re_path(r'^(?P<pk>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/image/upload/$',
ComponentS3ImageUploadView.as_view(), name='upload-image'),
re_path(r'^(?P<pk>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/image/success/$',
ComponentS3ImageUploadSuccessEndpointView.as_view(), name='upload-image-success'),
re_path(r'^(?P<component_id>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/image/(?P<pk>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/delete/$',
ComponentS3ImageDeleteView.as_view(), name='image-delete'),
re_path(r'^(?P<component_id>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/image/(?P<pk>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/title/$',
ComponentS3ImageTitleUpdateView.as_view(), name='image-title-edit'),
]
| 58.458333 | 179 | 0.565217 | [
"MIT"
] | iotile/iotile_cloud | server/apps/component/urls.py | 1,403 | Python |
# coding=utf-8
import json
import urllib
import urllib2
from bs4 import BeautifulSoup
def run(bot, chat_id, user, keyConfig='', message='', totalResults=1):
requestText = str(message)
if requestText == '':
totalSteamGames = int(Get_steam_total())
totalGOGGames = int(Get_GOG_total())
if totalSteamGames is not None and totalGOGGames is not None:
bot.sendMessage(chat_id=chat_id, text='I\'m sorry ' + (user if not user == '' else 'Dave') + \
', there are ' + str(int(totalSteamGames) + int(totalGOGGames)) +
' total games on Steam and GOG combined. Pick one.')
return True
retryCount = 3
appId = ''
while retryCount > 0 and appId == '':
retryCount -= 1
rawSteamSearchResultsMarkup = urllib.urlopen('http://store.steampowered.com/search/?category1=998&term=' + requestText).read()
appId = steam_results_parser(rawSteamSearchResultsMarkup)
if appId:
steamGameLink = 'http://store.steampowered.com/app/' + appId
bypassAgeGate = urllib2.build_opener()
#this bypasses the "mature content - continue/cancel" screen
bypassAgeGate.addheaders.append(('Cookie', 'mature_content=1; path=/; max-age=31536000;expires=Fri, 26 Mar 2027 20:00:00 GMT'))
bypassAgeGate.open(steamGameLink)
#this bypasses the "enter your date of birth" screen
bypassAgeGate.addheaders.append(('Cookie', 'birthtime=0; path=/; max-age=31536000;expires=Fri, 26 Mar 2027 20:00:00 GMT'))
code = bypassAgeGate.open(steamGameLink).read()
if 'id=\"agegate_wizard\"' in code:
gameTitle = steam_age_gate_parser(code)
bot.sendMessage(chat_id=chat_id, text='I\'m sorry ' + (user if not user == '' else 'Dave') + \
', I\'m afraid that \"' + gameTitle + '\" is protected by an age gate.\n' +
steamGameLink)
return False
gameResults = steam_game_parser(code, steamGameLink)
try:
bot.sendMessage(chat_id=chat_id, text=gameResults,
disable_web_page_preview=True, parse_mode='Markdown')
except:
bot.sendMessage(chat_id=chat_id, text=gameResults)
return True
else:
gogSearchData = json.load(urllib.urlopen('http://embed.gog.com/games/ajax/filtered?mediaType=game&search=' + requestText))
appId, price, discount = gog_results_parser(gogSearchData)
if appId:
gogGameLink = 'http://api.gog.com/products/' + str(appId) + '?expand=downloads,expanded_dlcs,description,screenshots,videos,related_products,changelog'
data = json.load(urllib.urlopen(gogGameLink))
gameResults = gog_game_parser(data, price, discount)
bot.sendMessage(chat_id=chat_id, text=gameResults,
disable_web_page_preview=True, parse_mode='Markdown')
else:
bot.sendMessage(chat_id=chat_id, text='I\'m sorry ' + (user if not user == '' else 'Dave') + \
', I\'m afraid I can\'t find the game ' + \
requestText.encode('utf-8'))
def steam_results_parser(rawMarkup):
soup = BeautifulSoup(rawMarkup, 'html.parser')
resultList = []
for resultRow in soup.findAll('a', attrs={'class':'search_result_row'}):
if 'data-ds-appid' in resultRow.attrs:
resultList.append(resultRow['data-ds-appid'])
if 'data-ds-bundleid' in resultRow.attrs:
resultList.append(resultRow['data-ds-bundleid'])
if len(resultList) > 0:
return resultList[0]
return ''
def Get_steam_total():
rawMarkup = urllib.urlopen('http://store.steampowered.com/search/?category1=998&term=#').read()
soup = BeautifulSoup(rawMarkup, 'html.parser')
findPaginationString = soup.find('div', attrs={'class': 'search_pagination_left'})
if findPaginationString:
rawPaginationString = findPaginationString.string
return rawPaginationString.replace('showing 1 - 25 of', '').strip()
return 'uncountable'
def Get_GOG_total():
GogSearchResultsData = json.load(urllib.urlopen('http://embed.gog.com/games/ajax/filtered?mediaType=game&sort=bestselling'))
if 'totalGamesFound' in GogSearchResultsData:
return GogSearchResultsData['totalGamesFound']
return 'uncountable'
def steam_age_gate_parser(rawMarkup):
soup = BeautifulSoup(rawMarkup, 'html.parser')
rawTitleString = soup.find('title').string
return rawTitleString.strip()
def steam_game_parser(code, link):
soup = BeautifulSoup(code, 'html.parser')
AllGameDetailsFormatted = ''
titleDiv = soup.find('div', attrs={'class':'apphub_AppName'})
if titleDiv:
gameTitle = titleDiv.string
AllGameDetailsFormatted += '*' + gameTitle
priceDiv = soup.find('div', attrs={'class':'game_purchase_price price'})
if priceDiv:
gamePrice = priceDiv.string
AllGameDetailsFormatted += ' - ' + gamePrice.strip()
else:
priceDiv = soup.find('div', attrs={'class':'discount_final_price'})
if priceDiv:
gamePrice = priceDiv.string
AllGameDetailsFormatted += ' - ' + gamePrice.strip()
discountPercentageDiv = soup.find('div', attrs={'class':'discount_pct'})
if discountPercentageDiv:
percentageDiscountedBy = discountPercentageDiv.string
AllGameDetailsFormatted += ' (at ' + percentageDiscountedBy.strip() + ' off)'
AllGameDetailsFormatted += '*\n'
else:
print('Cannot parse title as div with class apphub_AppName from Steam page for ' + link)
descriptionDiv = soup.find('div', attrs={'class':'game_description_snippet'})
if descriptionDiv:
descriptionSnippet = descriptionDiv.string.replace('\r', '').replace('\n', '').replace('\t', '').replace('_', ' ')
AllGameDetailsFormatted += descriptionSnippet + '\n'
if AllGameDetailsFormatted:
AllGameDetailsFormatted += link + '\n'
dateSpan = soup.find('div', attrs={'class':'date'})
if dateSpan:
releaseDate = dateSpan.string
AllGameDetailsFormatted += 'Release Date: ' + releaseDate + '\n'
featureList = ''
featureLinks = soup.findAll('a', attrs={'class':'name'})
if len(featureLinks) > 0:
for featureLink in featureLinks:
if featureLink.string != None:
featureList += ' ' + featureLink.string.replace('Seated', 'Seated VR') + '\n'
AllGameDetailsFormatted += 'Features:\n' + featureList
reviewRows = ''
reviewDivs = soup.findAll('div', attrs={'class':'user_reviews_summary_row'})
if len(reviewDivs) > 0:
for reviewRow in reviewDivs:
reviewSubtitleRawDiv = reviewRow.find('div', attrs={'class':'subtitle column'})
reviewSubtitleDiv = ''
if reviewSubtitleRawDiv is not None:
reviewSubtitleDiv = reviewSubtitleRawDiv.string
reviewSummaryRawDiv = reviewRow.find('div', attrs={'class':'summary column'})
reviewSummaryDiv = ''
if reviewSummaryRawDiv is not None:
reviewSummaryDiv = reviewSummaryRawDiv.string
if not reviewSummaryDiv:
findReviewSummaryDiv = reviewRow.find('span', attrs={'class':'game_review_summary'})
if findReviewSummaryDiv:
reviewSummaryDiv = findReviewSummaryDiv.string
reviewSummaryDiv = reviewSummaryDiv.replace('\r', '').replace('\n', '').replace('\t', '')
if reviewSummaryDiv != 'No user reviews':
reviewRows += ' ' + reviewSubtitleDiv + \
reviewSummaryDiv.replace('-', '')\
.replace(' user reviews', '')\
.replace(' of the ', ' of ') + '\n'
if reviewRows:
AllGameDetailsFormatted += 'Reviews:\n' + reviewRows.replace('Recent Reviews:', '')
if AllGameDetailsFormatted.endswith('\n'):
AllGameDetailsFormatted = AllGameDetailsFormatted[:AllGameDetailsFormatted.rfind('\n')]
tagList = ''
tagLinks = soup.findAll('a', attrs={'class':'app_tag'})
if len(tagLinks) > 0:
for tagLink in tagLinks:
tagList += tagLink.string.replace('\r', '').replace('\n', '').replace('\t', '') + ', '
AllGameDetailsFormatted += '\n' + 'Tags:\n`' + tagList
if AllGameDetailsFormatted.endswith(', '):
AllGameDetailsFormatted = AllGameDetailsFormatted[:AllGameDetailsFormatted.rfind(', ')]
AllGameDetailsFormatted += '`'
return AllGameDetailsFormatted
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def gog_results_parser(searchData):
if len(searchData['products']) > 0:
firstResult = searchData['products'][0]
if 'id' in firstResult:
return firstResult['id'], firstResult['price']['finalAmount'], firstResult['price']['discountPercentage']
return '', '', ''
def gog_game_parser(data, price, discount):
AllGameDetailsFormatted = ''
if 'title' in data:
gameTitle = data['title']
AllGameDetailsFormatted += '*' + gameTitle
else:
raise Exception('Cannot parse title from gog api object for this game.')
if price > 0:
AllGameDetailsFormatted += ' - ' + str(price) + '$'
else:
AllGameDetailsFormatted += ' - free to play'
if discount > 0:
AllGameDetailsFormatted += ' (at ' + discount + '% off)'
AllGameDetailsFormatted += '*\n'
if 'description' in data and 'full' in data['description']:
descriptionSnippet = data['description']['full']
AllGameDetailsFormatted += descriptionSnippet\
.replace('*','')\
.replace('<br>', '')\
.replace('<b>', '*')\
.replace('</b>', '*') + '\n'
#if 'links' in data and 'product_card' in data['links']:
# AllGameDetailsFormatted += data['links']['product_card'] + '\n'
#if 'release_date' in data:
# AllGameDetailsFormatted += 'Release Date: ' + data['release_date'] + '\n'
return AllGameDetailsFormatted
| 46.724138 | 163 | 0.609502 | [
"Apache-2.0"
] | SalamiArmy/TelegramSteamBotForGoogleAppEngine | telegram_commands/getgame.py | 10,840 | Python |
import logging
from typing import Dict, Sequence
from talon import Context, Module, actions
from .user_settings import get_list_from_csv
mod = Module()
ctx = Context()
mod.list("vocabulary", desc="additional vocabulary words")
# Default words that will need to be capitalized.
# DON'T EDIT THIS. Edit settings/words_to_replace.csv instead.
# These defaults and those later in this file are ONLY used when
# auto-creating the corresponding settings/*.csv files. Those csv files
# determine the contents of user.vocabulary and dictate.word_map. Once they
# exist, the contents of the lists/dictionaries below are irrelevant.
_capitalize_defaults = [
# NB. the lexicon now capitalizes January/February by default, but not the
# others below. Not sure why.
"January",
"February",
# March omitted because it's a regular word too
"April",
# May omitted because it's a regular word too
"June",
"July",
"August", # technically also an adjective but the month is far more common
"September",
"October",
"November",
"December",
]
# Default words that need to be remapped.
_word_map_defaults = {
# E.g:
# "cash": "cache",
# This is the opposite ordering to words_to_replace.csv (the latter has the target word first)
}
_word_map_defaults.update({word.lower(): word for word in _capitalize_defaults})
# phrases_to_replace is a spoken form -> written form map, used by our
# implementation of `dictate.replace_words` (at bottom of file) to rewrite words
# and phrases Talon recognized. This does not change the priority with which
# Talon recognizes particular phrases over others.
phrases_to_replace = get_list_from_csv(
"words_to_replace.csv",
headers=("Replacement", "Original"),
default=_word_map_defaults
)
# "dictate.word_map" is used by Talon's built-in default implementation of
# `dictate.replace_words`, but supports only single-word replacements.
# Multi-word phrases are ignored.
ctx.settings["dictate.word_map"] = phrases_to_replace
# Default words that should be added to Talon's vocabulary.
# Don't edit this. Edit 'additional_vocabulary.csv' instead
_simple_vocab_default = ["nmap", "admin", "Cisco", "Citrix", "VPN", "DNS", "Minecraft"]
# Defaults for different pronounciations of words that need to be added to
# Talon's vocabulary.
_default_vocabulary = {
"N map": "nmap",
"under documented": "under-documented",
}
_default_vocabulary.update({word: word for word in _simple_vocab_default})
# "user.vocabulary" is used to explicitly add words/phrases that Talon doesn't
# recognize. Words in user.vocabulary (or other lists and captures) are
# "command-like" and their recognition is prioritized over ordinary words.
ctx.lists["user.vocabulary"] = get_list_from_csv(
"additional_words.csv",
headers=("Word(s)", "Spoken Form (If Different)"),
default=_default_vocabulary,
)
class PhraseReplacer:
"""Utility for replacing phrases by other phrases inside text or word lists.
Replacing longer phrases has priority.
Args:
- phrase_dict: dictionary mapping recognized/spoken forms to written forms
"""
def __init__(self, phrase_dict: Dict[str, str]):
# Index phrases by first word, then number of subsequent words n_next
phrase_index = dict()
for spoken_form, written_form in phrase_dict.items():
words = spoken_form.split()
if not words:
logging.warning("Found empty spoken form for written form"
f"{written_form}, ignored")
continue
first_word, n_next = words[0], len(words) - 1
phrase_index.setdefault(first_word, {}) \
.setdefault(n_next, {})[tuple(words[1:])] = written_form
# Sort n_next index so longer phrases have priority
self.phrase_index = {
first_word: list(sorted(same_first_word.items(), key=lambda x: -x[0]))
for first_word, same_first_word in phrase_index.items()
}
def replace(self, input_words: Sequence[str]) -> Sequence[str]:
input_words = tuple(input_words) # tuple to ensure hashability of slices
output_words = []
first_word_i = 0
while first_word_i < len(input_words):
first_word = input_words[first_word_i]
next_word_i = first_word_i + 1
# Could this word be the first of a phrase we should replace?
for n_next, phrases_n_next in self.phrase_index.get(first_word, []):
# Yes. Perhaps a phrase with n_next subsequent words?
continuation = input_words[next_word_i : next_word_i + n_next]
if continuation in phrases_n_next:
# Found a match!
output_words.append(phrases_n_next[continuation])
first_word_i += 1 + n_next
break
else:
# No match, just add the word to the result
output_words.append(first_word)
first_word_i += 1
return output_words
# Wrapper used for testing.
def replace_string(self, text: str) -> str:
return ' '.join(self.replace(text.split()))
# Unit tests for PhraseReplacer
rep = PhraseReplacer({
'this': 'foo',
'that': 'bar',
'this is': 'stopping early',
'this is a test': 'it worked!',
})
assert rep.replace_string('gnork') == 'gnork'
assert rep.replace_string('this') == 'foo'
assert rep.replace_string('this that this') == 'foo bar foo'
assert rep.replace_string('this is a test') == 'it worked!'
assert rep.replace_string('well this is a test really') == 'well it worked! really'
assert rep.replace_string('try this is too') == 'try stopping early too'
assert rep.replace_string('this is a tricky one') == 'stopping early a tricky one'
phrase_replacer = PhraseReplacer(phrases_to_replace)
@ctx.action_class('dictate')
class OverwrittenActions:
def replace_words(words: Sequence[str]) -> Sequence[str]:
try:
return phrase_replacer.replace(words)
except:
# fall back to default implementation for error-robustness
logging.error("phrase replacer failed!")
return actions.next(words)
| 38.654321 | 98 | 0.677579 | [
"MIT"
] | DylanOpet/knausj_talon | code/vocabulary.py | 6,262 | Python |
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dlxapi
from dlxapi.models.project_attachment_removed_message_payload import ProjectAttachmentRemovedMessagePayload # noqa: E501
from dlxapi.rest import ApiException
class TestProjectAttachmentRemovedMessagePayload(unittest.TestCase):
"""ProjectAttachmentRemovedMessagePayload unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProjectAttachmentRemovedMessagePayload(self):
"""Test ProjectAttachmentRemovedMessagePayload"""
# FIXME: construct object with mandatory attributes with example values
# model = dlxapi.models.project_attachment_removed_message_payload.ProjectAttachmentRemovedMessagePayload() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.902439 | 129 | 0.7534 | [
"MIT"
] | dlens/dlxapi | python/test/test_project_attachment_removed_message_payload.py | 1,103 | Python |
#!/usr/bin/env python3
import sys
def gen_freq(freq: float, duration: int):
tmp = 0.0
pri = chr(32)
sec = chr(126)
for i in range(duration):
if (tmp >= freq):
tmp -= freq
pri, sec = sec, pri
tmp += 1.0
print(pri, end='')
sys.stdout.flush()
if __name__ == '__main__':
assert(len(sys.argv) == 3)
gen_freq(float(sys.argv[1]), int(sys.argv[2]))
| 21 | 50 | 0.528571 | [
"Unlicense"
] | kodo-pp/nplayer | .pake/pkgdir/files/opt/nplayer/gen_freq.py | 420 | Python |
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ... import core as _core
from ... import meta as _meta
__all__ = [
'Event',
'EventSeries',
]
@pulumi.output_type
class Event(dict):
"""
Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.
"""
def __init__(__self__, *,
event_time: str,
metadata: '_meta.v1.outputs.ObjectMeta',
action: Optional[str] = None,
api_version: Optional[str] = None,
deprecated_count: Optional[int] = None,
deprecated_first_timestamp: Optional[str] = None,
deprecated_last_timestamp: Optional[str] = None,
deprecated_source: Optional['_core.v1.outputs.EventSource'] = None,
kind: Optional[str] = None,
note: Optional[str] = None,
reason: Optional[str] = None,
regarding: Optional['_core.v1.outputs.ObjectReference'] = None,
related: Optional['_core.v1.outputs.ObjectReference'] = None,
reporting_controller: Optional[str] = None,
reporting_instance: Optional[str] = None,
series: Optional['outputs.EventSeries'] = None,
type: Optional[str] = None):
"""
Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.
:param str event_time: eventTime is the time when this Event was first observed. It is required.
:param str action: action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.
:param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param int deprecated_count: deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
:param str deprecated_first_timestamp: deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
:param str deprecated_last_timestamp: deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
:param '_core.v1.EventSourceArgs' deprecated_source: deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
:param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param str note: note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
:param str reason: reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.
:param '_core.v1.ObjectReferenceArgs' regarding: regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
:param '_core.v1.ObjectReferenceArgs' related: related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
:param str reporting_controller: reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.
:param str reporting_instance: reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.
:param 'EventSeriesArgs' series: series is data about the Event series this event represents or nil if it's a singleton Event.
:param str type: type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.
"""
pulumi.set(__self__, "event_time", event_time)
pulumi.set(__self__, "metadata", metadata)
if action is not None:
pulumi.set(__self__, "action", action)
if api_version is not None:
pulumi.set(__self__, "api_version", 'events.k8s.io/v1')
if deprecated_count is not None:
pulumi.set(__self__, "deprecated_count", deprecated_count)
if deprecated_first_timestamp is not None:
pulumi.set(__self__, "deprecated_first_timestamp", deprecated_first_timestamp)
if deprecated_last_timestamp is not None:
pulumi.set(__self__, "deprecated_last_timestamp", deprecated_last_timestamp)
if deprecated_source is not None:
pulumi.set(__self__, "deprecated_source", deprecated_source)
if kind is not None:
pulumi.set(__self__, "kind", 'Event')
if note is not None:
pulumi.set(__self__, "note", note)
if reason is not None:
pulumi.set(__self__, "reason", reason)
if regarding is not None:
pulumi.set(__self__, "regarding", regarding)
if related is not None:
pulumi.set(__self__, "related", related)
if reporting_controller is not None:
pulumi.set(__self__, "reporting_controller", reporting_controller)
if reporting_instance is not None:
pulumi.set(__self__, "reporting_instance", reporting_instance)
if series is not None:
pulumi.set(__self__, "series", series)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eventTime")
def event_time(self) -> str:
"""
eventTime is the time when this Event was first observed. It is required.
"""
return pulumi.get(self, "event_time")
@property
@pulumi.getter
def metadata(self) -> '_meta.v1.outputs.ObjectMeta':
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def action(self) -> Optional[str]:
"""
action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="deprecatedCount")
def deprecated_count(self) -> Optional[int]:
"""
deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_count")
@property
@pulumi.getter(name="deprecatedFirstTimestamp")
def deprecated_first_timestamp(self) -> Optional[str]:
"""
deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_first_timestamp")
@property
@pulumi.getter(name="deprecatedLastTimestamp")
def deprecated_last_timestamp(self) -> Optional[str]:
"""
deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_last_timestamp")
@property
@pulumi.getter(name="deprecatedSource")
def deprecated_source(self) -> Optional['_core.v1.outputs.EventSource']:
"""
deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
"""
return pulumi.get(self, "deprecated_source")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def note(self) -> Optional[str]:
"""
note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
"""
return pulumi.get(self, "note")
@property
@pulumi.getter
def reason(self) -> Optional[str]:
"""
reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.
"""
return pulumi.get(self, "reason")
@property
@pulumi.getter
def regarding(self) -> Optional['_core.v1.outputs.ObjectReference']:
"""
regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
"""
return pulumi.get(self, "regarding")
@property
@pulumi.getter
def related(self) -> Optional['_core.v1.outputs.ObjectReference']:
"""
related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
"""
return pulumi.get(self, "related")
@property
@pulumi.getter(name="reportingController")
def reporting_controller(self) -> Optional[str]:
"""
reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.
"""
return pulumi.get(self, "reporting_controller")
@property
@pulumi.getter(name="reportingInstance")
def reporting_instance(self) -> Optional[str]:
"""
reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.
"""
return pulumi.get(self, "reporting_instance")
@property
@pulumi.getter
def series(self) -> Optional['outputs.EventSeries']:
"""
series is data about the Event series this event represents or nil if it's a singleton Event.
"""
return pulumi.get(self, "series")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EventSeries(dict):
"""
EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.
"""
def __init__(__self__, *,
count: int,
last_observed_time: str):
"""
EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.
:param int count: count is the number of occurrences in this series up to the last heartbeat time.
:param str last_observed_time: lastObservedTime is the time when last Event from the series was seen before last heartbeat.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "last_observed_time", last_observed_time)
@property
@pulumi.getter
def count(self) -> int:
"""
count is the number of occurrences in this series up to the last heartbeat time.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="lastObservedTime")
def last_observed_time(self) -> str:
"""
lastObservedTime is the time when last Event from the series was seen before last heartbeat.
"""
return pulumi.get(self, "last_observed_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 54.782772 | 454 | 0.694196 | [
"Apache-2.0"
] | sunbing81/pulumi-kubernetes | sdk/python/pulumi_kubernetes/events/v1/outputs.py | 14,627 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-17 21:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserManagement', '0028_auto_20170316_1206'),
]
operations = [
migrations.AddField(
model_name='course',
name='institution_abbr',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
| 23.285714 | 73 | 0.633947 | [
"MIT"
] | SkillSmart/ConferenceManagementSystem | UserManagement/migrations/0029_course_institution_abbr.py | 489 | Python |
import math
t = int(raw_input())
for i in range(t) :
n = int(raw_input())
print math.factorial(n)
'''Why using math.factorial() is faster?
beacuse many of the Python libraries are in C or C++ and not it Python.
Hence the speed improves.'''
| 22.727273 | 74 | 0.684 | [
"MIT"
] | tapaswenipathak/Competitive-Programming | CodeChef/FCTRL2.py | 250 | Python |
"""Models for assessing primal feasibility"""
from __future__ import unicode_literals
from .set import ConstraintSet
from ..nomials import Variable, VectorVariable, parse_subs, NomialArray
from ..keydict import KeyDict
from .. import NamedVariables, SignomialsEnabled
class ConstraintsRelaxedEqually(ConstraintSet):
"""Relax constraints the same amount, as in Eqn. 10 of [Boyd2007].
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
Attributes
----------
relaxvar : Variable
The variable controlling the relaxation. A solved value of 1 means no
relaxation. Higher values indicate the amount by which all constraints
have been made easier: e.g., a value of 1.5 means all constraints were
50 percent easier in the final solution than in the original problem.
[Boyd2007] : "A tutorial on geometric programming", Optim Eng 8:67-122
"""
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvar = Variable("C")
with SignomialsEnabled():
for constraint in constraints.flat():
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvar))
ConstraintSet.__init__(self, {
"relaxed constraints": relconstraints,
"minimum relaxation": self.relaxvar >= 1}, substitutions)
class ConstraintsRelaxed(ConstraintSet):
"""Relax constraints, as in Eqn. 11 of [Boyd2007].
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
Attributes
----------
relaxvars : Variable
The variables controlling the relaxation. A solved value of 1 means no
relaxation was necessary or optimal for a particular constraint.
Higher values indicate the amount by which that constraint has been
made easier: e.g., a value of 1.5 means it was made 50 percent easier
in the final solution than in the original problem.
[Boyd2007] : "A tutorial on geometric programming", Optim Eng 8:67-122
"""
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvars = VectorVariable(len(constraints), "C")
with SignomialsEnabled():
for i, constraint in enumerate(constraints.flat()):
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvars[i]))
ConstraintSet.__init__(self, {
"relaxed constraints": relconstraints,
"minimum relaxation": self.relaxvars >= 1}, substitutions)
class ConstantsRelaxed(ConstraintSet):
"""Relax constants in a constraintset.
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
include_only : set (optional)
variable names must be in this set to be relaxed
exclude : set (optional)
variable names in this set will never be relaxed
Attributes
----------
relaxvars : Variable
The variables controlling the relaxation. A solved value of 1 means no
relaxation was necessary or optimal for a particular constant.
Higher values indicate the amount by which that constant has been
made easier: e.g., a value of 1.5 means it was made 50 percent easier
in the final solution than in the original problem. Of course, this
can also be determined by looking at the constant's new value directly.
"""
# pylint:disable=too-many-locals
def __init__(self, constraints, include_only=None, exclude=None):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
exclude = frozenset(exclude) if exclude else frozenset()
include_only = frozenset(include_only) if include_only else frozenset()
substitutions = KeyDict(constraints.substitutions)
constants, _, linked = parse_subs(constraints.varkeys, substitutions)
constrained_varkeys = constraints.constrained_varkeys()
if linked:
kdc = KeyDict(constants)
constants.update({k: f(kdc) for k, f in linked.items()
if k in constrained_varkeys})
self.constants = constants
relaxvars, self.origvars, relaxation_constraints = [], [], {}
with NamedVariables("Relax") as (self.lineage, _):
pass
self._unrelaxmap = {}
for key, value in constants.items():
if value == 0:
continue
elif include_only and key.name not in include_only:
continue
elif key.name in exclude:
continue
key.descr.pop("gradients", None)
descr = key.descr.copy()
descr.pop("veckey", None)
descr["lineage"] = descr.pop("lineage", ())+(self.lineage[-1],)
relaxvardescr = descr.copy()
relaxvardescr["unitrepr"] = "-"
relaxvar = Variable(**relaxvardescr)
relaxvars.append(relaxvar)
del substitutions[key]
var = Variable(**key.descr)
self.origvars.append(var)
unrelaxeddescr = descr.copy()
unrelaxeddescr["lineage"] += (("OriginalValues", 0),)
unrelaxed = Variable(**unrelaxeddescr)
self._unrelaxmap[unrelaxed.key] = key
substitutions[unrelaxed] = value
relaxation_constraints[str(key)] = [relaxvar >= 1,
unrelaxed/relaxvar <= var,
var <= unrelaxed*relaxvar]
self.relaxvars = NomialArray(relaxvars)
ConstraintSet.__init__(self, {
"original constraints": constraints,
"relaxation constraints": relaxation_constraints})
self.substitutions = substitutions
def process_result(self, result):
ConstraintSet.process_result(self, result)
csenss = result["sensitivities"]["constants"]
for const, origvar in self._unrelaxmap.items():
csenss[origvar] = csenss[const]
del csenss[const]
| 40.391566 | 79 | 0.633557 | [
"MIT"
] | giserh/gpkit | gpkit/constraints/relax.py | 6,705 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.