ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a5247fec4fe61fe14a12f57f03864aab6e91ab3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Name: PyAnime4K utils
Author: TianZerL
Editor: TianZerL
"""
from pyanime4k import ffmpeg_handler
import contextlib
import os
def migrate_audio_streams(
upscaled_video: str, original_video: str, output_path: str
) -> None:
""" migrate audio streams
Args:
upscaled_video (str): path of upscaled video.
original_video (str): path of original video.
output_path (str): path to output result.
Raises:
FileExistsError: when output path exists and isn't a directory
"""
ffmpeg_handler.migrate_audio_streams(
upscaled_video=upscaled_video,
original_video=original_video,
output_path=output_path,
)
with contextlib.suppress(FileNotFoundError):
os.remove(upscaled_video)
|
py | 1a5248489938637fdc7d3a3d5551d3a74ef65f15 | """Experiment data."""
|
py | 1a5248490a77963f006a3f96c43a207bf2992808 | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
*,
scope: Optional[Union[str, "_models.ItemScope"]] = None,
type: Optional[Union[str, "_models.ItemTypeParameter"]] = "none",
include_content: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"scopePath": _SERIALIZER.url("scope_path", scope_path, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if scope is not None:
_query_parameters['scope'] = _SERIALIZER.query("scope", scope, 'str')
if type is not None:
_query_parameters['type'] = _SERIALIZER.query("type", type, 'str')
if include_content is not None:
_query_parameters['includeContent'] = _SERIALIZER.query("include_content", include_content, 'bool')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
*,
id: Optional[str] = None,
name: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}/item") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"scopePath": _SERIALIZER.url("scope_path", scope_path, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if id is not None:
_query_parameters['id'] = _SERIALIZER.query("id", id, 'str')
if name is not None:
_query_parameters['name'] = _SERIALIZER.query("name", name, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_put_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
*,
json: JSONType = None,
content: Any = None,
override_item: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}/item") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"scopePath": _SERIALIZER.url("scope_path", scope_path, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if override_item is not None:
_query_parameters['overrideItem'] = _SERIALIZER.query("override_item", override_item, 'bool')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
*,
id: Optional[str] = None,
name: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}/item") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"scopePath": _SERIALIZER.url("scope_path", scope_path, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if id is not None:
_query_parameters['id'] = _SERIALIZER.query("id", id, 'str')
if name is not None:
_query_parameters['name'] = _SERIALIZER.query("name", name, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
**kwargs
)
class AnalyticsItemsOperations(object):
"""AnalyticsItemsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2015_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
scope: Optional[Union[str, "_models.ItemScope"]] = None,
type: Optional[Union[str, "_models.ItemTypeParameter"]] = "none",
include_content: Optional[bool] = None,
**kwargs: Any
) -> List["_models.ApplicationInsightsComponentAnalyticsItem"]:
"""Gets a list of Analytics Items defined within an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param scope_path: Enum indicating if this item definition is owned by a specific user or is
shared between all users with access to the Application Insights component.
:type scope_path: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.ItemScopePath
:param scope: Enum indicating if this item definition is owned by a specific user or is shared
between all users with access to the Application Insights component. Default value is None.
:type scope: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.ItemScope
:param type: Enum indicating the type of the Analytics item. Default value is "none".
:type type: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.ItemTypeParameter
:param include_content: Flag indicating whether or not to return the content of each applicable
item. If false, only return the item information. Default value is None.
:type include_content: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ApplicationInsightsComponentAnalyticsItem, or the result of cls(response)
:rtype:
list[~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentAnalyticsItem]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationInsightsComponentAnalyticsItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
scope_path=scope_path,
api_version=api_version,
scope=scope,
type=type,
include_content=include_content,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ApplicationInsightsComponentAnalyticsItem]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
id: Optional[str] = None,
name: Optional[str] = None,
**kwargs: Any
) -> "_models.ApplicationInsightsComponentAnalyticsItem":
"""Gets a specific Analytics Items defined within an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param scope_path: Enum indicating if this item definition is owned by a specific user or is
shared between all users with access to the Application Insights component.
:type scope_path: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.ItemScopePath
:param id: The Id of a specific item defined in the Application Insights component. Default
value is None.
:type id: str
:param name: The name of a specific item defined in the Application Insights component. Default
value is None.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentAnalyticsItem, or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentAnalyticsItem
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentAnalyticsItem"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
scope_path=scope_path,
api_version=api_version,
id=id,
name=name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentAnalyticsItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}/item"} # type: ignore
@distributed_trace
def put(
self,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
item_properties: "_models.ApplicationInsightsComponentAnalyticsItem",
override_item: Optional[bool] = None,
**kwargs: Any
) -> "_models.ApplicationInsightsComponentAnalyticsItem":
"""Adds or Updates a specific Analytics Item within an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param scope_path: Enum indicating if this item definition is owned by a specific user or is
shared between all users with access to the Application Insights component.
:type scope_path: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.ItemScopePath
:param item_properties: Properties that need to be specified to create a new item and add it to
an Application Insights component.
:type item_properties:
~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentAnalyticsItem
:param override_item: Flag indicating whether or not to force save an item. This allows
overriding an item if it already exists. Default value is None.
:type override_item: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentAnalyticsItem, or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentAnalyticsItem
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentAnalyticsItem"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(item_properties, 'ApplicationInsightsComponentAnalyticsItem')
request = build_put_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
scope_path=scope_path,
api_version=api_version,
content_type=content_type,
json=_json,
override_item=override_item,
template_url=self.put.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentAnalyticsItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}/item"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_name: str,
scope_path: Union[str, "_models.ItemScopePath"],
id: Optional[str] = None,
name: Optional[str] = None,
**kwargs: Any
) -> None:
"""Deletes a specific Analytics Items defined within an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param scope_path: Enum indicating if this item definition is owned by a specific user or is
shared between all users with access to the Application Insights component.
:type scope_path: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.ItemScopePath
:param id: The Id of a specific item defined in the Application Insights component. Default
value is None.
:type id: str
:param name: The name of a specific item defined in the Application Insights component. Default
value is None.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2015-05-01") # type: str
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
scope_path=scope_path,
api_version=api_version,
id=id,
name=name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/{scopePath}/item"} # type: ignore
|
py | 1a5248dd1c00732eabd60282f65fc481d0d83709 | from django import forms
from apps.accounts import models as mdl_account
class LoginForm(forms.Form):
username = forms.CharField(label="Username", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
password = forms.CharField(label="Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type':'password'
}))
class RegisterMemberForm(forms.Form):
CHOISE_GENDER = (
("", "---"),
("L", "MALE"),
("F", "FEMALE")
)
username = forms.CharField(label="Username", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
first_name = forms.CharField(label="First Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
last_name = forms.CharField(label="Last Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
member_card = forms.ModelChoiceField(queryset=mdl_account.CardMember.objects.all(), label="Member Card", widget=forms.Select(attrs={
'class': 'form-control'
}))
email = forms.CharField(label="Email", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'email'
}))
gender = forms.CharField(label="Gender", widget=forms.Select(choices=CHOISE_GENDER,
attrs={
'class': 'form-control'
}))
password = forms.CharField(label="Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password'
}))
password2 = forms.CharField(label="Confirm Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password'
}))
photo = forms.ImageField(label='Photo')
class CustomerEditForm(forms.Form):
GENDER = (
('m', 'Male'),
('f', "Female")
)
username = forms.CharField(label="Username", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
first_name = forms.CharField(label="First Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
last_name = forms.CharField(label="Last Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
password = forms.CharField(label="Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password',
'placeholder': 'change your password in here'
}), required=False)
photo = forms.ImageField(label='Photo', required=False)
card_member = forms.ModelChoiceField(label='Card Member', queryset=mdl_account.CardMember.objects.all(), widget=forms.Select(
attrs={
'class': 'form-control'
}
))
gender = forms.CharField(label='Gender', widget=forms.Select(attrs={
'class': 'form-control'
},choices=GENDER))
class CustomersForm(forms.Form):
GENDER = (
('m', 'Male'),
('f', "Female")
)
username = forms.CharField(label="Username", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
first_name = forms.CharField(label="First Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
last_name = forms.CharField(label="Last Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
password = forms.CharField(label="Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password'
}))
password2 = forms.CharField(label="Confirm Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password'
}))
card_member = forms.ModelChoiceField(label='Card Member', queryset=mdl_account.CardMember.objects.all(), widget=forms.Select(
attrs={
'class': 'form-control'
}
))
# gender = forms.CharField(label='Gender', widget=forms.ChoiceField(choices=GENDER))
gender = forms.CharField(label='Gender', widget=forms.Select(attrs={
'class': 'form-control'
},choices=GENDER))
photo = forms.ImageField(required=False)
class SalesForm(forms.Form):
username = forms.CharField(label="Username", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
first_name = forms.CharField(label="First Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
last_name = forms.CharField(label="Last Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
password = forms.CharField(label="Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password'
}))
password2 = forms.CharField(label="Confirm Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password'
}))
address = forms.CharField(label="Address", widget=forms.TextInput(attrs={
'class': 'form-control',
}))
nik_numb = forms.CharField(label="NIK", widget=forms.TextInput(attrs={
'class': 'form-control',
}))
ktp_image = forms.ImageField()
class SalesEditForm(forms.Form):
username = forms.CharField(label="Username", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
first_name = forms.CharField(label="First Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
last_name = forms.CharField(label="Last Name", widget=forms.TextInput(attrs={
'class': 'form-control'
}))
password = forms.CharField(label="Password", widget=forms.TextInput(attrs={
'class': 'form-control',
'type': 'password',
'placeholder': 'change your password in here'
}), required=False)
address = forms.CharField(label="Address", widget=forms.TextInput(attrs={
'class': 'form-control',
}))
nik_numb = forms.CharField(label="NIK", widget=forms.TextInput(attrs={
'class': 'form-control',
}))
ktp_image = forms.ImageField(required=False)
|
py | 1a524a0775baad0ed1f4fe8ca089c415c611cc28 | # 1- Faça um programa que peça dois números inteiros e imprima a soma de dois números
n1 = int(input('Primeiro número: ')) #pede o primeiro número para o usuário
n2 = int(input('Segundo número: ')) #pede o segundo número para o usuário
soma = n1 + n2 #efetua a soma e guarda ela numa variável
print(soma) #mostra na tela o resultado que foi guardado na variável soma |
py | 1a524a6f3dc6e4f0dadda5d23d0ed98a5a0731bd | from os.path import (
realpath,
join,
)
from typing import List
from hummingbot.core.utils.symbol_fetcher import SymbolFetcher
# Global variables
required_exchanges: List[str] = []
symbol_fetcher = SymbolFetcher.get_instance()
# Global static values
KEYFILE_PREFIX = "key_file_"
KEYFILE_POSTFIX = ".json"
GLOBAL_CONFIG_PATH = "conf/conf_global.yml"
TOKEN_ADDRESSES_FILE_PATH = realpath(join(__file__, "../../wallet/ethereum/erc20_tokens.json"))
DEFAULT_KEY_FILE_PATH = "conf/"
DEFAULT_LOG_FILE_PATH = "logs/"
DEFAULT_ETHEREUM_RPC_URL = "https://mainnet.coinalpha.com/hummingbot-test-node"
TEMPLATE_PATH = realpath(join(__file__, "../../templates/"))
CONF_FILE_PATH = "conf/"
CONF_PREFIX = "conf_"
CONF_POSTFIX = "_strategy"
EXCHANGES = {
"bamboo_relay",
"binance",
"coinbase_pro",
"ddex",
"idex",
"radar_relay",
}
DEXES = {
"bamboo_relay",
"ddex",
"idex",
"radar_relay",
}
STRATEGIES = {
"cross_exchange_market_making",
"arbitrage",
"discovery",
"pure_market_making",
}
EXAMPLE_PAIRS = {
"binance": "ZRXETH",
"ddex": "ZRX-WETH",
"idex": "ETH_ZRX",
"radar_relay": "ZRX-WETH",
"bamboo_relay": "ZRX-WETH",
"coinbase_pro": "ETH-USDC",
}
MAXIMUM_OUTPUT_PANE_LINE_COUNT = 1000
MAXIMUM_LOG_PANE_LINE_COUNT = 1000
# Liquidity Bounties:
LIQUIDITY_BOUNTY_CONFIG_PATH = "conf/conf_liquidity_bounty.yml"
MIN_ETH_STAKED_REQUIREMENT = 0.05
|
py | 1a524b6be8578cda8dc10848f051a0429bd67e58 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
from dltk.core.modules.base import AbstractModule
class TransposedConvolution(AbstractModule):
"""Tranposed convolution module
This build a 2D or 3D transposed convolution based on the dimensionality of the input
"""
def __init__(self, out_filters, strides=(1, 1, 1), filter_shape=None, use_bias=False, name='conv_transposed'):
"""Constructs a transposed convolution
The kernel shape is defined as 2 * stride for stride > 1
Parameters
----------
out_filters : int
number of output filters
strides : tuple or list, optional
strides used for the transposed convolution
use_bias : bool
flag to toggle whether a bias is added to the output
name : string
name of the module
"""
self.in_shape = None
self.in_filters = None
self.out_filters = out_filters
self.out_shape = None
self.strides = strides
self.use_bias = use_bias
self.filter_shape = filter_shape
self.full_strides =[1,] + list(self.strides) + [1,]
self._rank = len(list(self.strides))
assert 1 < self._rank < 4, 'Transposed convolutions are only supported in 2D and 3D'
super(TransposedConvolution, self).__init__(name=name)
def _get_kernel(self):
"""Builds the kernel for the transposed convolution
Returns
-------
tf.Variable
kernel for the transposed convolution
"""
kernel_shape = tuple(self.up_spatial_shape + [self.out_filters, self.in_filters])
k = tf.get_variable("k", shape=kernel_shape, initializer=tf.uniform_unit_scaling_initializer(),
collections=self.WEIGHT_COLLECTIONS)
return k
def _build(self, inp):
"""Applies a transposed convolution to the input tensor
Parameters
----------
inp : tf.Tensor
input tensor
Returns
-------
tf.Tensor
output of transposed convolution
"""
assert (len(inp.get_shape().as_list()) - 2) == self._rank, \
'The input has {} dimensions but this is a {}D convolution'.format(
len(inp.get_shape().as_list()), self._rank)
self.in_shape = tuple(inp.get_shape().as_list())
if self.in_filters is None:
self.in_filters = self.in_shape[-1]
assert self.in_filters == self.in_shape[-1], 'Convolution was built for different number of channels'
inp_shape = tf.shape(inp)
if self.filter_shape is None:
self.up_spatial_shape = [2 * s if s > 1 else 1 for s in self.strides]
else:
self.up_spatial_shape = self.filter_shape
self.out_shape = [inp_shape[i] * self.full_strides[i] for i in range(len(self.in_shape) - 1)] + [self.out_filters,]
self._k = self._get_kernel()
self.variables.append(self._k)
conv_op = tf.nn.conv3d_transpose
if self._rank == 2:
conv_op = tf.nn.conv2d_transpose
outp = conv_op(inp, self._k, output_shape=self.out_shape, strides=self.full_strides, padding='SAME',
name='conv_tranposed')
if self.use_bias:
self._b = tf.get_variable("b", shape=(self.out_filters,), initializer=tf.constant_initializer())
self.variables.append(self._b)
outp += self._b
outp.set_shape([self.in_shape[i] * self.full_strides[i] if isinstance(self.in_shape[i], int) else None
for i in range(len(self.in_shape) - 1)] + [self.out_filters,])
return outp |
py | 1a524b7fa815f15b9e54c7d994169ffe28591f4a | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from user.models import User
from entities.models import Entity
import ucsv as csv
class Command(BaseCommand):
args = '<members_file>'
help = 'import members from a csv file'
def handle(self, *args, **options):
file_name = args[0]
f = open(file_name, 'rb')
d = csv.DictReader(f)
for row in d:
username = row['username']
if User.objects.filter(username=username).exists():
print 'User %s exists.' % (username)
else:
first_name = row.get('first_name', '')
last_name = row.get('last_name', '')
email = row.get('email', '')
locality = row.get('locality', '')
gender = row.get('gender', '')
password = row.get('password', '')
user = User(
username=username,
email=email,
first_name=first_name,
last_name=last_name,
)
user.set_password(password)
user.save()
user.profile.gender = gender
try:
user.profile.locality = Entity.objects.get(id=locality)
except ObjectDoesNotExist:
print 'user %s locality id %s does not exist' % (username, locality)
user.profile.save()
|
py | 1a524bc43ed63383a99a8045d801cb436631b354 | """
Name : c14_13_average_price_call.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : [email protected]
[email protected]
"""
import scipy as sp
s0=40. # today stock price
x=40. # exercise price
T=0.5 # maturity in years
r=0.05 # risk-free rate
sigma=0.2 # volatility (annualized)
sp.random.seed(123) # fix a seed here
n_simulation=100 # number of simulations
n_steps=100. # number of steps
#
dt=T/n_steps
call=sp.zeros([n_simulation], dtype=float)
for j in range(0, n_simulation):
sT=s0
total=0
for i in range(0,int(n_steps)):
e=sp.random.normal()
sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt))
total+=sT
price_average=total/n_steps
call[j]=max(price_average-x,0)
#
call_price=sp.mean(call)*sp.exp(-r*T)
print('call price based on average price = ', round(call_price,3))
|
py | 1a524c0977c8fdb86f847b135137fdc14a66114b | """MIT License
Copyright (c) 2021 Jacopo Schiavon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
import jax.numpy as jnp
from jax.ops import index_update, index
from typing import NamedTuple, Union
from .linesearch import wolfe_linesearch, LineSearchParameter
class OptimizerParams(NamedTuple):
"""
Parameters for the optimizer.
Arguments:
- maxtime (float, default 100)
maximum run time
- maxiter (int, default 100)
maximum number of iterations
- mingradnorm (float, default 1e-8)
minimum gradient norm
- minstepsize (float, default 1e-16)
minimum length of the stepsize
- maxcostevals (int, default 5000)
maximum number of cost evaluations
- verbosity (int, default 0)
Level of information logged by the solver while it operates,
0 is silent, 1 basic info on status, 2 info per iteration,
3 info per linesearch iteration
- logverbosity (bool, default False)
Wether to produce a log of the optimization
"""
maxtime: Union[float, jnp.ndarray] = 100
maxiter: Union[int, jnp.ndarray] = 500
mingradnorm: Union[float, jnp.ndarray] = 1e-6
minstepsize: Union[float, jnp.ndarray] = 1e-16
maxcostevals: Union[int, jnp.ndarray] = 5000
memory: Union[int, jnp.ndarray] = 4
verbosity: Union[int, jnp.ndarray] = 0
logverbosity: Union[bool, jnp.ndarray] = False
class OptimizerResult(NamedTuple):
"""
Object holding optimization results.
Components:
- name:
name of the optimizer
- success:
True if optimization succeeded.
- status:
integer solver specific return code. 0 means nominal.
- message:
solver specific message that explains status.
- x:
final solution.
- fun:
final function value.
- gr:
final gradient array.
- grnorm:
norm of the gradient.
- nfev:
integer number of function evaluations.
- ngev:
integer number of gradient evaluations.
- nit:
integer number of iterations of the optimization algorithm.
- stepsize:
length of the final stepsize
- time:
time used by the optimization
"""
name: str
success: Union[bool, jnp.ndarray]
status: Union[int, jnp.ndarray]
message: str
x: jnp.ndarray
fun: jnp.ndarray
gr: jnp.ndarray
grnorm: jnp.ndarray
nfev: Union[int, jnp.ndarray]
ngev: Union[int, jnp.ndarray]
nit: Union[int, jnp.ndarray]
stepsize: jnp.ndarray
time: jnp.ndarray
def __str__(self):
"""String representation."""
try:
sz = self.x.size
except AttributeError:
sz = sum(x.size for x in self.x)
return (
"{}.\n---\nSuccess: {} with status {} in {:.3f} s.\n"
"[{}]\n"
" -Iterations {} (cost evaluation: {}, gradient evaluation: {}, "
"time/it: {})\n"
" \t Function value {:.3f}, gradient norm {}, stepsize {},\n"
" \t value of X:\n{}"
).format(
self.name,
self.success, self.status, self.time, self.message,
self.nit, self.nfev, self.ngev, self.time / self.nit,
self.fun, self.grnorm, self.stepsize,
self.x if sz < 50 else '\t... Too big to show...'
)
def pprint(self):
"""Print a concise summary of the result."""
message = "Optimization {}completed (status {}).".format("" if self.success else "not ", self.status)
details = "{} iterations in {:.3f} s".format(self.nit, self.time)
print(message + "\t" + details)
class OptimizerLog(NamedTuple):
"""
Object holding optimization log.
Components:
- name:
name of the optimizer
- fun:
sequence of function value.
- x:
sequence of data points.
- grnorm:
sequence of gradient norm.
- beta:
sequence of computed beta.
- fev:
sequence of function evaluations.
- gev:
sequence of gradient evaluations.
- it:
iterations.
- stepsize:
sequence of length of stepsize.
- time
sequence of times.
"""
name: str = ''
fun: jnp.ndarray = jnp.array([])
x: list = []
grnorm: jnp.ndarray = jnp.array([])
fev: jnp.ndarray = jnp.array([], dtype=int)
gev: jnp.ndarray = jnp.array([], dtype=int)
it: jnp.ndarray = jnp.array([], dtype=int)
stepsize: jnp.ndarray = jnp.array([])
time: jnp.ndarray = jnp.array([])
class RL_BFGS():
"""L-BFGS optimizer."""
Algo = 'Riemannian Limited memory BFGS'
def __init__(self, manifold, **pars):
"""
Riemannian Limited memory BFGS.
Mandatory arguments:
- manifold
A manifold object that defines the operations on the manifold
Optional parameters:
- maxtime (float, default 100)
maximum run time
- maxiter (int, default 100)
maximum number of iterations
- mingradnorm (float, default 1e-8)
minimum gradient norm
- minstepsize (float, default 1e-16)
minimum length of the stepsize
- maxcostevals (int, default 5000)
maximum number of cost evaluations
- verbosity (int, default 0)
Level of information logged by the solver while it operates,
0 is silent, 1 basic info on status, 2 info per iteration
- logverbosity (bool, default False)
Wether to produce a log of the optimization
Optional linesearch parameters:
- ls_maxiter (int, default 10)
maximum number of iterations
- ls_minstepsize (float, default 1e-16)
minimum length of the stepsize
- ls_optimism (float, default 1.2)
optimism of the new step
- ls_initial_step (float, default 1)
initial stepsize before linesearch
- ls_suff_decr (float, default 1e-4)
sufficient decrease parameter
- ls_contraction (float, default 0.5)
contraction factor (must be 0 < c < 1)
- ls_verbosity (int, default 0)
Level of information to be displayed:
< 3 is silent, 3+ basic info
"""
self.man = manifold
self.__name__ = ("{} on {}".format(self.Algo, str(self.man).lower()))
self._parms = OptimizerParams(
**{k: pars[k] for k in pars if k in OptimizerParams._fields}
)
self._ls_pars = LineSearchParameter(
**{k: pars[k] for k in pars if k in LineSearchParameter._fields}
)
if pars.get('ls_verbosity', None) is None:
self._ls_pars = self._ls_pars._replace(
ls_verbosity=max(0, self._parms.verbosity - 3)
)
def __str__(self):
"""Representat the optimizer as a string."""
return self.__name__
def _check_stopping_criterion(self, time0, iters=-1, grnorm=float('inf'), stepsize=float('inf'), costevals=-1):
status = - 1
if grnorm <= self._parms.mingradnorm:
status = 0
elif stepsize <= self._parms.minstepsize:
status = 1
elif iters >= self._parms.maxiter:
status = 2
elif time.time() >= time0 + self._parms.maxtime:
status = 3
elif costevals >= self._parms.maxcostevals:
status = 4
return status
def _compute_descent_direction(self, l, x, gr, gamma):
q = gr
m = self._parms.memory
H0 = gamma * jnp.identity(gr.shape[0])
alpha = jnp.zeros(shape=(l,))
if self._parms.verbosity >= 3:
print('\tm = {}; l = {}'.format(m, l))
for i in jnp.arange(m - l + 1, 0, -1):
alpha = index_update(alpha, i-1, self.rhok[i-1] * self.man.inner(x, self.sk[i-1], q))
q = q - alpha[i-1] * self.yk[i-1]
r = jnp.matmul(H0, q)
for i in jnp.arange(0, l):
beta = self.rhok[i] * self.man.inner(x, self.yk[i], r)
r = r + (alpha[i] - beta) * self.sk[i]
return -r
def solve(self, objective, gradient, x=None, key=None):
"""
Perform optimization using gradient descent with linesearch.
This method first computes the gradient (derivative) of obj
w.r.t. arg, and then optimizes by moving in the direction of
steepest descent (which is the opposite direction to the gradient).
Arguments:
- objective : callable
The cost function to be optimized
- gradient : callable
The gradient of the cost function
- x : array (None)
Optional parameter. Starting point on the manifold. If none
then a starting point will be randomly generated.
- key: array (None)
Optional parameter, required if x is not provided to randomly
initiate the algorithm
Returns:
- OptimizerResult object
"""
msg = ("status meaning: 0=converged, 1=stepsize too small, "
"2=max iters reached, 3=max time reached, "
"4=max cost evaluations, "
"-1=undefined"
)
if self._parms.verbosity >= 1:
print('Starting {}'.format(self.__name__))
self._costev = 0
self._gradev = 0
def cost(x):
self._costev += 1
return objective(x)
def grad(x):
self._gradev += 1
return self.man.egrad2rgrad(x, gradient(x))
def ls(c_a_g, x, d, f0, df0, g0):
return wolfe_linesearch(c_a_g, x, d, f0, df0, g0, self._ls_pars)
if x is None:
try:
x = self.man.rand(key)
except TypeError:
raise ValueError("Either provide an initial point for"
" the algorithm or a valid random key"
" to perform random initialization")
k = 0
l = 0
gamma = 1.
stepsize = 1.
memorized_shape = (self._parms.memory,) + x.shape
self.sk = jnp.zeros(shape=(memorized_shape))
self.yk = jnp.zeros(shape=(memorized_shape))
self.rhok = jnp.zeros(shape=(self._parms.memory))
f0 = cost(x)
gr = grad(x)
grnorm = self.man.norm(x, gr)
d = - gr
df0 = self.man.inner(x, d, gr)
t_start = time.time()
if self._parms.logverbosity:
logs = OptimizerLog(
name="log of {}".format(self.__name__),
fun=jnp.array([f0]),
x=[x],
grnorm=jnp.array([grnorm]),
fev=jnp.array([self._costev], dtype=int),
gev=jnp.array([self._gradev], dtype=int),
it=jnp.array([k], dtype=int),
stepsize=jnp.array([1.]),
time=jnp.array([time.time() - t_start])
)
while True:
if self._parms.verbosity >= 2:
print('iter: {}\n\tfun value: {:.2f}'.format(k, f0))
print('\tgrad norm: {:.2f}'.format(grnorm))
print('\tdirectional derivative: {:.2f}'.format(df0))
status = self._check_stopping_criterion(
t_start,
k,
grnorm,
stepsize,
self._costev
)
if status >= 0:
break
def cost_and_grad(t):
xnew = self.man.retraction(x, t * d)
fn = cost(xnew)
gn = grad(xnew)
dn = self.man.inner(xnew, - gn, gn)
# dn = -jnp.sqrt(jnp.abs(dn)) if dn < 0 else jnp.sqrt(dn)
return fn, gn, dn
ls_results = ls(cost_and_grad, x, d, f0, df0, gr)
alpha = ls_results.a_k
stepsize = jnp.abs(alpha * df0)
newx = self.man.retraction(x, alpha * d)
newf = ls_results.f_k
newgr = ls_results.g_k
newgrnorm = self.man.norm(x, gr)
sk = self.man.vector_transport(x, alpha * d, alpha * d)
yk = newgr - self.man.vector_transport(x, alpha * d, gr)
a = self.man.inner(newx, yk, sk)
b = self.man.norm(newx, sk) ** 2
if ((a / b) >= (grnorm * 1e-4)):
c = self.man.norm(newx, yk) ** 2
rhok = 1 / a
gamma = a / c
if l == self._parms.memory:
self.sk = self.sk[1:]
self.yk = self.yk[1:]
self.rhok = self.rhok[1:]
else:
l += 1
self.sk = index_update(self.sk, index[l, :, :], sk)
self.yk = index_update(self.yk, index[l, :, :], yk)
self.rhok = index_update(self.rhok, l, rhok)
for i in range(l):
self.sk = index_update(self.sk, index[i, :, :], self.man.vector_transport(x, alpha*d, self.sk[i]))
self.yk = index_update(self.yk, index[i, :, :], self.man.vector_transport(x, alpha*d, self.yk[i]))
if self._parms.verbosity >= 2:
print('\talpha: {}'.format(alpha))
print('\tgamma: {}'.format(gamma))
print('\ta / b: {}'.format(a / b))
x = newx
f0 = newf
gr = newgr
grnorm = newgrnorm
k += 1
if l > 0:
d = self._compute_descent_direction(l, x, gr, gamma)
else:
d = - gr
df0 = self.man.inner(x, d, gr)
if self._parms.logverbosity:
logs = logs._replace(
fun=jnp.append(logs.fun, f0),
x=logs.x + [x],
grnorm=jnp.append(logs.grnorm, grnorm),
fev=jnp.append(logs.fev, self._costev),
gev=jnp.append(logs.gev, self._gradev),
it=jnp.append(logs.it, k),
stepsize=jnp.append(logs.stepsize, stepsize),
time=jnp.append(logs.time, time.time() - t_start)
)
result = OptimizerResult(
name=self.__name__,
success=True if status == 0 else False,
status=status,
message=msg,
x=x,
fun=f0,
gr=gr,
grnorm=grnorm,
nfev=self._costev,
ngev=self._gradev,
nit=k,
stepsize=stepsize,
time=(time.time() - t_start)
)
if self._parms.verbosity >= 1:
result.pprint()
if self._parms.logverbosity:
return result, logs
return result
|
py | 1a524c2a099f89355cb96968bf3c2974c8f94093 | import cv2
from flask import Flask
from scipy.spatial import distance
from extract_car import extract_car
from extract_parking import extract_parking
from extract_rectangle import extract_rectangle
app = Flask(__name__)
def find_parking(show_output):
cap = cv2.VideoCapture("http://10.200.9.248:8080/video/mjpeg")
accumulator_free = []
accumulator_occupied = []
global available_parking
while(True):
ret, frame = cap.read()
height, width = frame.shape[:2]
frame = frame[0:height, 0:2*(width//3)]
frame_copy = frame.copy()
res = extract_parking(frame)
res, positions_free = extract_rectangle(frame, res)
res, positions_occupied = extract_car(frame, res)
for acc_free in accumulator_free:
acc_free[1] -= 1
for pos_free in positions_free:
pos_found = False
for acc_free in accumulator_free:
dist = distance.euclidean(pos_free, acc_free[0])
if dist < 10:
acc_free[1] += 2
pos_found = True
break
if not pos_found:
accumulator_free.append([pos_free, 1, False, 'f'])
i = 0
while i < len(accumulator_free):
if accumulator_free[i][1] >= 5:
accumulator_free[i][1] = 5
accumulator_free[i][2] = True
elif accumulator_free[i][1] == 0:
accumulator_free.pop(i)
continue
i += 1
total_spots = 0
for acc_free in accumulator_free:
if acc_free[2]:
cv2.circle(frame_copy, acc_free[0], 30, (0, 200, 0), -1)
total_spots += 1
#######
for acc_free in accumulator_occupied:
acc_free[1] -= 1
for pos_free in positions_occupied:
pos_found = False
for acc_free in accumulator_occupied:
dist = distance.euclidean(pos_free, acc_free[0])
if dist < 10:
acc_free[1] += 2
pos_found = True
break
if not pos_found:
accumulator_occupied.append([pos_free, 1, False, 'o'])
i = 0
while i < len(accumulator_occupied):
if accumulator_occupied[i][1] >= 5:
accumulator_occupied[i][1] = 5
accumulator_occupied[i][2] = True
elif accumulator_occupied[i][1] == 0:
accumulator_occupied.pop(i)
continue
i += 1
for acc_free in accumulator_occupied:
if acc_free[2]:
cv2.circle(frame_copy, acc_free[0], 30, (0, 0, 200), -1)
total_spots += 1
if show_output:
cv2.imshow('frame', frame_copy)
if total_spots == 3:
merged_list = accumulator_free + accumulator_occupied
spots = sorted(merged_list, key=lambda acc: acc[0][1])
spots = sorted(spots, key=lambda acc: acc[0][0])
available_parking = []
for s in range(len(spots)):
if spots[s][-1] == 'f':
available_parking.append(s)
# print(available_parking)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
if show_output:
cv2.destroyAllWindows()
@app.route('/')
def main():
"""Say hello"""
global available_parking
print(available_parking)
return "Hello World: %s" % str(available_parking)
@app.route('/initialize')
def initialize():
global available_parking
find_parking(False)
if __name__ == '__main__':
app.run(threaded=True)
|
py | 1a524cbdac517f66b1d961af486db31baea31542 | #!/usr/bin/env python -O
"""
This is the test class for testing Carbon Film resistor module algorithms and models.
"""
# -*- coding: utf-8 -*-
#
# tests.unit.TestFilm.py is part of The RTK Project
#
# All rights reserved.
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))) + "/rtk", )
import unittest
from nose.plugins.attrib import attr
from hardware.component.resistor.fixed.Film import *
from hardware.component.resistor.variable.Film import *
__author__ = 'Andrew Rowland'
__email__ = '[email protected]'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2015 Andrew "Weibullguy" Rowland'
class TestFilmModel(unittest.TestCase):
"""
Class for testing the Carbon Film resistor data model class.
"""
def setUp(self):
"""
Setup the test fixture for the Carbon Film resistor class.
"""
self.DUT = Film()
@attr(all=True, unit=True)
def test_create(self):
"""
(TestCarbonFilm) __init__ should return a Carbon Film resistor model
"""
self.assertTrue(isinstance(self.DUT, Film))
# Verify Hardware class was properly initialized.
self.assertEqual(self.DUT.revision_id, None)
self.assertEqual(self.DUT.category_id, 0)
# Verify Resistor class was properly initialized.
self.assertEqual(self.DUT.quality, 0)
# Verify the Carbon Film resistor class was properly
# initialized.
self.assertEqual(self.DUT._lst_piR, [1.0, 1.1, 1.6, 2.5])
self.assertEqual(self.DUT._lst_piE, [1.0, 2.0, 8.0, 4.0, 14.0, 4.0,
8.0, 10.0, 18.0, 19.0, 0.2, 10.0,
28.0, 510.0])
self.assertEqual(self.DUT._lst_piQ_count, [0.03, 0.1, 0.3, 1.0, 3.0,
10.0])
self.assertEqual(self.DUT._lst_piQ_stress, [0.03, 0.1, 0.3, 1.0, 5.0,
5.0, 15.0])
self.assertEqual(self.DUT._lambdab_count, [[0.0012, 0.0027, 0.011,
0.0054, 0.020, 0.0063,
0.013, 0.018, 0.033, 0.030,
0.00025, 0.014, 0.044,
0.69],
[0.0012, 0.0027, 0.011,
0.0054, 0.020, 0.0063,
0.013, 0.018, 0.033, 0.030,
0.00025, 0.014, 0.044,
0.69],
[0.0014, 0.0031, 0.013,
0.0061, 0.023, 0.0072,
0.014, 0.021, 0.038, 0.034,
0.00028, 0.016, 0.050,
0.78],
[0.0014, 0.0031, 0.013,
0.0061, 0.023, 0.0072,
0.014, 0.021, 0.038, 0.034,
0.00028, 0.016, 0.050,
0.78]])
self.assertEqual(self.DUT.subcategory, 26)
self.assertEqual(self.DUT.specification, 0)
@attr(all=True, unit=True)
def test_set_attributes(self):
"""
(TestCarbonFilm) set_attributes should return a 0 error code on success
"""
_values = (0, 32, 'Alt Part #', 'Attachments', 'CAGE Code',
'Comp Ref Des', 0.0, 0.0, 0.0, 'Description', 100.0, 0,
0, 'Figure #', 50.0, 'LCN', 1, 0, 10.0, 'Name', 'NSN', 0,
'Page #', 0, 0, 'Part #', 1, 'Ref Des', 1.0, 0,
'Remarks', 0.0, 'Spec #', 0, 30.0, 30.0, 0.0, 2014,
1.0, 155.0, -25.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1, 0.0, '', 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0, 1, 0.0,
0, 0, 0.0, 30.0, 0.0, 358.0,
1.0, 125.0, 0.01, 2.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3, 1)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 0)
self.assertEqual(self.DUT.specification, 1)
@attr(all=True, unit=True)
def test_get_attributes(self):
"""
(TestResistor) get_attributes should return a tuple of attribute values
"""
_values = (None, None, '', '', '', '', 0.0, 0.0, 0.0, '', 100.0, 0, 0,
'', 50.0, '', 1, 0, 10.0, '', '', 0, '', 0, 0, '', 1, '',
1.0, 0, '', 0.0, '', 0, 30.0, 30.0, 0.0, 2014,
1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1,
0.0, {}, 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0,
0.0, 30.0, 0.0, 30.0,
0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, '', 0)
self.assertEqual(self.DUT.get_attributes(), _values)
@attr(all=True, unit=True)
def test_calculate_217_count(self):
"""
(TestCarbonFilm) calculate_part should return False on success when calculating MIL-HDBK-217F parts count results
"""
self.DUT.quality = 1
self.DUT.environment_active = 5
self.DUT.hazard_rate_type = 1
self.DUT.specification = 2
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piQ')
self.assertEqual(self.DUT.hazard_rate_model['lambdab'], 0.02)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 6.0E-10)
@attr(all=True, unit=True)
def test_calculate_217_stress_insulated(self):
"""
(TestCarbonFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results for insulated resistors
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.temperature_active = 30.0
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 1.0E4
self.DUT.specification = 1
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.001069402)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 6.4164103E-11)
@attr(all=True, unit=True)
def test_calculate_217_stress_non_insulated(self):
"""
(TestCarbonFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results for non-insulated resistors
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 3.3E5
self.DUT.specification = 3
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.001818069)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.1)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 1.19992554E-10)
@attr(all=True, unit=True)
def test_calculate_217_stress_mid_resistance(self):
"""
(TestCarbonFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with mid-range resistance
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 3.3E6
self.DUT.specification = 3
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.001818069)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.6)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 1.74534624E-10)
@attr(all=True, unit=True)
def test_calculate_217_stress_high_resistance(self):
"""
(TestCarbonFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with high resistance
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 3.3E7
self.DUT.specification = 1
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.001069402)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 2.5)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 1.604103E-10)
@attr(all=True, unit=True)
def test_calculate_217_stress_overflow(self):
"""
(TestCarbonFilm) calculate_part should return True when an OverflowError is raised when calculating MIL-HDBK-217F stress results
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 1130.0
self.DUT.rated_power = 0.25
self.DUT.resistance = 1.1E4
self.assertTrue(self.DUT.calculate_part())
class TestFilmPowerPowerModel(unittest.TestCase):
"""
Class for testing the Carbon Film Power resistor data model class.
"""
def setUp(self):
"""
Setup the test fixture for the Carbon Film Power resistor class.
"""
self.DUT = FilmPower()
@attr(all=True, unit=True)
def test_create(self):
"""
(TestCarbonFilmPower) __init__ should return a Carbon Film Power resistor model
"""
self.assertTrue(isinstance(self.DUT, FilmPower))
# Verify Hardware class was properly initialized.
self.assertEqual(self.DUT.revision_id, None)
self.assertEqual(self.DUT.category_id, 0)
# Verify Resistor class was properly initialized.
self.assertEqual(self.DUT.quality, 0)
# Verify the Carbon Film Power resistor class was properly
# initialized.
self.assertEqual(self.DUT._lst_piR, [1.0, 1.2, 1.3, 3.5])
self.assertEqual(self.DUT._lst_piE, [1.0, 2.0, 10.0, 5.0, 17.0, 6.0,
8.0, 14.0, 18.0, 25.0, 0.5, 14.0,
36.0, 660.0])
self.assertEqual(self.DUT._lst_piQ_count, [0.03, 0.1, 0.3, 1.0, 3.0,
10.0])
self.assertEqual(self.DUT._lst_piQ_stress, [1.0, 3.0])
self.assertEqual(self.DUT._lst_lambdab_count, [0.012, 0.025, 0.13,
0.062, 0.21, 0.078,
0.10, 0.19, 0.24, 0.32,
0.0060, 0.18, 0.47,
8.2])
self.assertEqual(self.DUT.subcategory, 27)
@attr(all=True, unit=True)
def test_calculate_217_count(self):
"""
(TestCarbonFilmPower) calculate_part should return False on success when calculating MIL-HDBK-217F parts count results
"""
self.DUT.quality = 1
self.DUT.environment_active = 5
self.DUT.hazard_rate_type = 1
self.DUT.specification = 2
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piQ')
self.assertEqual(self.DUT.hazard_rate_model['lambdab'], 0.21)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 6.3E-9)
@attr(all=True, unit=True)
def test_calculate_217_stress_low_resistance(self):
"""
(TestCarbonFilmPower) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with low resistance range
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.temperature_active = 30.0
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 33.0
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.01274247)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 2.548494E-8)
@attr(all=True, unit=True)
def test_calculate_217_stress_mid1_resistance(self):
"""
(TestCarbonFilmPower) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with mid-range resistance
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 3300.0
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.01274247)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.2)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 3.0581928E-08)
@attr(all=True, unit=True)
def test_calculate_217_stress_mid2_resistance(self):
"""
(TestCarbonFilmPower) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with mid-range resistance
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 3.3E5
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.01274247)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.3)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 3.3130422E-08)
@attr(all=True, unit=True)
def test_calculate_217_stress_high_resistance(self):
"""
(TestCarbonFilmPower) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with high resistance
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.resistance = 3.3E7
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.01274247)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 3.5)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 8.919729E-08)
@attr(all=True, unit=True)
def test_calculate_217_stress_overflow(self):
"""
(TestCarbonFilmPower) calculate_part should return True when an OverflowError is raised when calculating MIL-HDBK-217F stress results
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 1130.0
self.DUT.rated_power = 0.25
self.DUT.resistance = 1.1E4
self.assertTrue(self.DUT.calculate_part())
class TestFilmNetworkModel(unittest.TestCase):
"""
Class for testing the Carbon Film Network resistor data model class.
"""
def setUp(self):
"""
Setup the test fixture for the Carbon Film Network resistor class.
"""
self.DUT = FilmNetwork()
@attr(all=True, unit=True)
def test_create(self):
"""
(TestCarbonFilmNetwork) __init__ should return a Carbon Film Network resistor model
"""
self.assertTrue(isinstance(self.DUT, FilmNetwork))
# Verify Hardware class was properly initialized.
self.assertEqual(self.DUT.revision_id, None)
self.assertEqual(self.DUT.category_id, 0)
# Verify Resistor class was properly initialized.
self.assertEqual(self.DUT.quality, 0)
# Verify the Carbon FilmNetwork resistor class was properly
# initialized.
self.assertEqual(self.DUT._lst_piE, [1.0, 2.0, 10.0, 5.0, 17.0, 6.0,
8.0, 14.0, 18.0, 25.0, 0.5, 14.0,
36.0, 660.0])
self.assertEqual(self.DUT._lst_piQ_count, [0.03, 0.1, 0.3, 1.0, 3.0,
10.0])
self.assertEqual(self.DUT._lst_piQ_stress, [1.0, 3.0])
self.assertEqual(self.DUT._lst_lambdab_count, [0.0023, 0.0066, 0.031,
0.013, 0.055, 0.022,
0.043, 0.077, 0.15,
0.10, 0.0011, 0.055,
0.15, 1.7])
self.assertEqual(self.DUT.subcategory, 28)
self.assertEqual(self.DUT.n_resistors, 1)
self.assertEqual(self.DUT.piT, 0.0)
self.assertEqual(self.DUT.piNR, 0.0)
@attr(all=True, unit=True)
def test_set_attributes(self):
"""
(TestCarbonFilmNetwork) set_attributes should return a 0 error code on success
"""
_values = (0, 32, 'Alt Part #', 'Attachments', 'CAGE Code',
'Comp Ref Des', 0.0, 0.0, 0.0, 'Description', 100.0, 0,
0, 'Figure #', 50.0, 'LCN', 1, 0, 10.0, 'Name', 'NSN', 0,
'Page #', 0, 0, 'Part #', 1, 'Ref Des', 1.0, 0,
'Remarks', 0.0, 'Spec #', 0, 30.0, 30.0, 0.0, 2014,
1.0, 155.0, -25.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1, 0.0, '', 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0, 1, 0.0,
0, 0, 0.0, 30.0, 0.0, 358.0,
1.0, 125.0, 0.01, 2.0, 1.0, 1.0, 0.1, 8.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3, 8)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 0)
self.assertEqual(self.DUT.n_resistors, 8)
self.assertEqual(self.DUT.piT, 0.1)
self.assertEqual(self.DUT.piNR, 8.0)
@attr(all=True, unit=True)
def test_set_attributes_missing_index(self):
"""
(TestCarbonFilmNetwork) set_attributes should return a 40 error code with missing inputs
"""
_values = (0, 32, 'Alt Part #', 'Attachments', 'CAGE Code',
'Comp Ref Des', 0.0, 0.0, 0.0, 'Description', 100.0, 0,
0, 'Figure #', 50.0, 'LCN', 1, 0, 10.0, 'Name', 'NSN', 0,
'Page #', 0, 0, 'Part #', 1, 'Ref Des', 1.0, 0,
'Remarks', 0.0, 'Spec #', 0, 30.0, 30.0, 0.0, 2014,
1.0, 155.0, -25.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1, 0.0, '', 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0, 1, 0.0,
0, 0, 0.0, 30.0, 0.0, 358.0,
1.0, 125.0, 0.01, 2.0, 1.0, 1.0, 0.1, 8.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 40)
@attr(all=True, unit=True)
def test_set_attributes_wrong_type(self):
"""
(TestCarbonFilmNetwork) set_attributes should return a 10 error code with a wrong data type
"""
_values = (0, 32, 'Alt Part #', 'Attachments', 'CAGE Code',
'Comp Ref Des', 0.0, 0.0, 0.0, 'Description', 100.0, 0,
0, 'Figure #', 50.0, 'LCN', 1, 0, 10.0, 'Name', 'NSN', 0,
'Page #', 0, 0, 'Part #', 1, 'Ref Des', 1.0, 0,
'Remarks', 0.0, 'Spec #', 0, 30.0, 30.0, 0.0, 2014,
1.0, 155.0, -25.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1, 0.0, '', 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0, 1, 0.0,
0, 0, 0.0, 30.0, 0.0, 358.0,
1.0, 125.0, 0.01, 2.0, 1.0, 1.0, 0.1, 8.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3, '')
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 10)
@attr(all=True, unit=True)
def test_get_attributes(self):
"""
(TestCarbonFilmNetwork) get_attributes should return a tuple of attribute values
"""
_values = (None, None, '', '', '', '', 0.0, 0.0, 0.0, '', 100.0, 0, 0,
'', 50.0, '', 1, 0, 10.0, '', '', 0, '', 0, 0, '', 1, '',
1.0, 0, '', 0.0, '', 0, 30.0, 30.0, 0.0, 2014,
1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1,
0.0, {}, 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0,
0.0, 30.0, 0.0, 30.0,
0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, '', 1, 0.0, 0.0)
self.assertEqual(self.DUT.get_attributes(), _values)
@attr(all=True, unit=True)
def test_calculate_217_count(self):
"""
(TestCarbonFilmNetwork) calculate_part should return False on success when calculating MIL-HDBK-217F parts count results
"""
self.DUT.quality = 1
self.DUT.environment_active = 5
self.DUT.hazard_rate_type = 1
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piQ')
self.assertEqual(self.DUT.hazard_rate_model['lambdab'], 0.055)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 1.65E-09)
@attr(all=True, unit=True)
def test_calculate_217_stress_case_temp_known(self):
"""
(TestCarbonFilmNetwork) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with case temperature known
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.temperature_active = 30.0
self.DUT.junction_temperature = 30.0
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.n_resistors = 8
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piT * piNR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.00006)
self.assertAlmostEqual(self.DUT.hazard_rate_model['piT'], 1.2518214)
self.assertEqual(self.DUT.hazard_rate_model['piNR'], 8.0)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 1.2017485E-09)
@attr(all=True, unit=True)
def test_calculate_217_stress_case_temp_unknown(self):
"""
(TestCarbonFilmNetwork) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with case temperature unknown
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.temperature_active = 30.0
self.DUT.junction_temperature = 0.0
self.DUT.operating_power = 0.113
self.DUT.rated_power = 0.25
self.DUT.n_resistors = 8
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piT * piNR * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.00006)
self.assertAlmostEqual(self.DUT.hazard_rate_model['piT'], 3.4542461)
self.assertEqual(self.DUT.hazard_rate_model['piNR'], 8.0)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 2.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 3.3160763E-09)
class TestVarFilmModel(unittest.TestCase):
"""
Class for testing the VarFilm Variable resistor data model class.
"""
def setUp(self):
"""
Setup the test fixture for the VarFilm Variable resistor class.
"""
self.DUT = VarFilm()
@attr(all=True, unit=True)
def test_create(self):
"""
(TestVarFilm) __init__ should return a VarFilm Variable resistor model
"""
self.assertTrue(isinstance(self.DUT, VarFilm))
# Verify Hardware class was properly initialized.
self.assertEqual(self.DUT.revision_id, None)
self.assertEqual(self.DUT.category_id, 0)
# Verify Resistor class was properly initialized.
self.assertEqual(self.DUT.quality, 0)
# Verify the VarFilm resistor class was properly
# initialized.
self.assertEqual(self.DUT._lst_piE, [1.0, 3.0, 14.0, 7.0, 24.0, 6.0,
12.0, 20.0, 30.0, 39.0, 0.5, 22.0,
57.0, 1000.0])
self.assertEqual(self.DUT._lst_piQ_count, [0.03, 0.1, 0.3, 1.0, 3.0,
10.0])
self.assertEqual(self.DUT._lst_piQ_stress, [2.0, 4.0])
self.assertEqual(self.DUT._lst_lambdab_count, [0.048, 0.16, 0.76, 0.36,
1.3, 0.36, 0.72, 1.4,
2.2, 2.3, 0.024, 1.2,
3.4, 52.0])
self.assertEqual(self.DUT.subcategory, 39)
self.assertEqual(self.DUT.n_taps, 3)
self.assertEqual(self.DUT.specification, 0)
self.assertEqual(self.DUT.piTAPS, 0.0)
self.assertEqual(self.DUT.piV, 0.0)
@attr(all=True, unit=True)
def test_set_attributes(self):
"""
(TestVarFilm) set_attributes should return a 0 error code on success
"""
_values = (0, 32, 'Alt Part #', 'Attachments', 'CAGE Code',
'Comp Ref Des', 0.0, 0.0, 0.0, 'Description', 100.0, 0,
0, 'Figure #', 50.0, 'LCN', 1, 0, 10.0, 'Name', 'NSN', 0,
'Page #', 0, 0, 'Part #', 1, 'Ref Des', 1.0, 0,
'Remarks', 0.0, 'Spec #', 0, 30.0, 30.0, 0.0, 2014,
1.0, 155.0, -25.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1, 0.0, '', 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0, 1, 0.0,
0, 0, 0.0, 30.0, 0.0, 358.0,
1.0, 125.0, 0.01, 2.0, 1.0, 1.0, 0.1, 8.0, 0.75, 0.3, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3, 5, 1)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 0)
self.assertEqual(self.DUT.n_taps, 5)
self.assertEqual(self.DUT.specification, 1)
self.assertEqual(self.DUT.piTAPS, 0.75)
self.assertEqual(self.DUT.piV, 0.3)
@attr(all=True, unit=True)
def test_set_attributes_missing_index(self):
"""
(TestVarFilm) set_attributes should return a 40 error code with missing inputs
"""
_values = (0, 32, 'Alt Part #', 'Attachments', 'CAGE Code',
'Comp Ref Des', 0.0, 0.0, 0.0, 'Description', 100.0, 0,
0, 'Figure #', 50.0, 'LCN', 1, 0, 10.0, 'Name', 'NSN', 0,
'Page #', 0, 0, 'Part #', 1, 'Ref Des', 1.0, 0,
'Remarks', 0.0, 'Spec #', 0, 30.0, 30.0, 0.0, 2014,
1.0, 155.0, -25.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1, 0.0, '', 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0, 1, 0.0,
0, 0, 0.0, 30.0, 0.0, 358.0,
1.0, 125.0, 0.01, 2.0, 1.0, 1.0, 0.1, 8.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 40)
@attr(all=True, unit=True)
def test_set_attributes_wrong_type(self):
"""
(TestVarFilm) set_attributes should return a 10 error code with a wrong data type
"""
_values = (0, 32, 'Alt Part #', 'Attachments', 'CAGE Code',
'Comp Ref Des', 0.0, 0.0, 0.0, 'Description', 100.0, 0,
0, 'Figure #', 50.0, 'LCN', 1, 0, 10.0, 'Name', 'NSN', 0,
'Page #', 0, 0, 'Part #', 1, 'Ref Des', 1.0, 0,
'Remarks', 0.0, 'Spec #', 0, 30.0, 30.0, 0.0, 2014,
1.0, 155.0, -25.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1, 0.0, '', 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0, 1, 0.0,
0, 0, 0.0, 30.0, 0.0, 358.0,
1.0, 125.0, 0.01, 2.0, 1.0, 1.0, 0.1, 8.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3, 5, '')
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 10)
@attr(all=True, unit=True)
def test_get_attributes(self):
"""
(TestVarFilm) get_attributes should return a tuple of attribute values
"""
_values = (None, None, '', '', '', '', 0.0, 0.0, 0.0, '', 100.0, 0, 0,
'', 50.0, '', 1, 0, 10.0, '', '', 0, '', 0, 0, '', 1, '',
1.0, 0, '', 0.0, '', 0, 30.0, 30.0, 0.0, 2014,
1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
0.0, 1.0, 1.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1,
0.0, {}, 0.0, 0.0, 0.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0,
0, 0,
0.0, 30.0, 0.0, 30.0,
0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, '', 3, 0, 0.0, 0.0)
self.assertEqual(self.DUT.get_attributes(), _values)
@attr(all=True, unit=True)
def test_calculate_217_count(self):
"""
(TestVarFilm) calculate_part should return False on success when calculating MIL-HDBK-217F parts count results
"""
self.DUT.quality = 1
self.DUT.environment_active = 5
self.DUT.hazard_rate_type = 1
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piQ')
self.assertEqual(self.DUT.hazard_rate_model['lambdab'], 1.3)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 0.03)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 3.9E-08)
@attr(all=True, unit=True)
def test_calculate_217_stress_low_resistance(self):
"""
(TestVarFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results for low resistances
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.temperature_active = 30.0
self.DUT.operating_power = 0.075
self.DUT.rated_power = 0.25
self.DUT.rated_voltage = 200.0
self.DUT.resistance = 3.3E3
self.DUT.n_taps = 5
self.DUT.specification = 1
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piTAPS * piR * piV * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.03185164)
self.assertAlmostEqual(self.DUT.hazard_rate_model['piTAPS'], 1.2392136)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piV'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 2.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 3.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 2.3682591E-07)
@attr(all=True, unit=True)
def test_calculate_217_stress_mid1_resistance(self):
"""
(TestVarFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results for mid-range resistances
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.075
self.DUT.rated_power = 0.25
self.DUT.rated_voltage = 200.0
self.DUT.resistance = 1.3E5
self.DUT.n_taps = 5
self.DUT.specification = 1
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piTAPS * piR * piV * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.03185164)
self.assertAlmostEqual(self.DUT.hazard_rate_model['piTAPS'], 1.2392136)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.2)
self.assertEqual(self.DUT.hazard_rate_model['piV'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 2.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 3.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 2.8419110E-07)
@attr(all=True, unit=True)
def test_calculate_217_stress_mid2_resistance(self):
"""
(TestVarFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with mid-range resistances
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.075
self.DUT.rated_power = 0.25
self.DUT.rated_voltage = 200.0
self.DUT.resistance = 3.3E5
self.DUT.n_taps = 5
self.DUT.specification = 2
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piTAPS * piR * piV * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.03495881)
self.assertAlmostEqual(self.DUT.hazard_rate_model['piTAPS'], 1.2392136)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.4)
self.assertEqual(self.DUT.hazard_rate_model['piV'], 1.0)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 2.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 3.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 3.6390004E-07)
@attr(all=True, unit=True)
def test_calculate_217_stress_high_resistance(self):
"""
(TestVarFilm) calculate_part should return False on success when calculating MIL-HDBK-217F stress results with high resistance
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 0.05
self.DUT.rated_power = 0.25
self.DUT.rated_voltage = 350.0
self.DUT.resistance = 1.6E6
self.DUT.n_taps = 5
self.DUT.specification = 2
self.assertFalse(self.DUT.calculate_part())
self.assertEqual(self.DUT.hazard_rate_model['equation'],
'lambdab * piTAPS * piR * piV * piQ * piE')
self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'],
0.03279464)
self.assertAlmostEqual(self.DUT.hazard_rate_model['piTAPS'], 1.2392136)
self.assertEqual(self.DUT.hazard_rate_model['piR'], 1.8)
self.assertEqual(self.DUT.hazard_rate_model['piV'], 1.05)
self.assertEqual(self.DUT.hazard_rate_model['piQ'], 2.0)
self.assertEqual(self.DUT.hazard_rate_model['piE'], 3.0)
self.assertAlmostEqual(self.DUT.hazard_rate_active, 4.6085265E-07)
@attr(all=True, unit=True)
def test_calculate_217_stress_overflow(self):
"""
(TestVarFilm) calculate_part should return True when an OverflowError is raised when calculating MIL-HDBK-217F stress results
"""
self.DUT.environment_active = 2
self.DUT.hazard_rate_type = 2
self.DUT.quality = 1
self.DUT.operating_power = 1130.0
self.DUT.rated_power = 0.25
self.DUT.resistance = 1.1E6
self.DUT.specification = 1
self.assertTrue(self.DUT.calculate_part())
|
bzl | 1a524d3552e708df13e8b40bfe61befcbab9c285 | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "93183a41b962ce21ea168357172aaf00cdca5bd9"
LLVM_SHA256 = "9f212bca2050e2cffa15aa72aa07d89e108b400d15ca541327a829e3d4108fb9"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
patch_file = "//third_party/llvm:disable_parallelism_in_verifier.patch",
)
|
py | 1a524d9a245f921fcc09416608b6ea7354d1aa2d | from six import BytesIO, StringIO, text_type, string_types
from django.http import HttpResponse
from django.contrib.contenttypes.models import ContentType
try:
from django.db.models.fields.related_descriptors import ManyToManyDescriptor
except ImportError:
# Django 1.8 compat hack.
from django.db.models.fields.related import (
ReverseManyRelatedObjectsDescriptor as ManyToManyDescriptor
)
from django.db.models import Avg, Count, Sum, Max, Min
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font
import csv
import re
from collections import namedtuple
from decimal import Decimal
from numbers import Number
from functools import reduce
import datetime
from .utils import (
get_relation_fields_from_model,
get_properties_from_model,
get_direct_fields_from_model,
get_model_from_path_string,
get_custom_fields_from_model,
)
DisplayField = namedtuple(
"DisplayField",
"path path_verbose field field_verbose aggregate total group choices field_type",
)
def generate_filename(title, ends_with):
title = title.split('.')[0]
title.replace(' ', '_')
title += ('_' + datetime.datetime.now().strftime("%m%d_%H%M"))
if not title.endswith(ends_with):
title += ends_with
return title
class DataExportMixin(object):
def build_sheet(self, data, ws, sheet_name='report', header=None, widths=None):
first_row = 1
column_base = 1
ws.title = re.sub(r'\W+', '', sheet_name)[:30]
if header:
for i, header_cell in enumerate(header):
cell = ws.cell(row=first_row, column=i + column_base)
cell.value = header_cell
cell.font = Font(bold=True)
if widths:
ws.column_dimensions[get_column_letter(i + 1)].width = widths[i]
for row in data:
for i in range(len(row)):
item = row[i]
# If item is a regular string
if isinstance(item, str):
# Change it to a unicode string
try:
row[i] = text_type(item)
except UnicodeDecodeError:
row[i] = text_type(item.decode('utf-8', 'ignore'))
elif type(item) is dict:
row[i] = text_type(item)
try:
ws.append(row)
except ValueError as e:
ws.append([e.message])
except:
ws.append(['Unknown Error'])
def build_xlsx_response(self, wb, title="report"):
""" Take a workbook and return a xlsx file response """
title = generate_filename(title, '.xlsx')
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(
myfile.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
def build_csv_response(self, wb, title="report"):
""" Take a workbook and return a csv file response """
title = generate_filename(title, '.csv')
myfile = StringIO()
sh = wb.active
c = csv.writer(myfile)
for r in sh.rows:
c.writerow([cell.value for cell in r])
response = HttpResponse(
myfile.getvalue(),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
def list_to_workbook(self, data, title='report', header=None, widths=None):
""" Create just a openpxl workbook from a list of data """
wb = Workbook()
title = re.sub(r'\W+', '', title)[:30]
if isinstance(data, dict):
i = 0
for sheet_name, sheet_data in data.items():
if i > 0:
wb.create_sheet()
ws = wb.worksheets[i]
self.build_sheet(
sheet_data, ws, sheet_name=sheet_name, header=header)
i += 1
else:
ws = wb.worksheets[0]
self.build_sheet(data, ws, header=header, widths=widths)
return wb
def list_to_xlsx_file(self, data, title='report', header=None, widths=None):
""" Make 2D list into a xlsx response for download
data can be a 2d array or a dict of 2d arrays
like {'sheet_1': [['A1', 'B1']]}
returns a StringIO file
"""
wb = self.list_to_workbook(data, title, header, widths)
if not title.endswith('.xlsx'):
title += '.xlsx'
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
return myfile
def list_to_csv_file(self, data, title='report', header=None, widths=None):
""" Make a list into a csv response for download.
"""
wb = self.list_to_workbook(data, title, header, widths)
if not title.endswith('.csv'):
title += '.csv'
myfile = StringIO()
sh = wb.active
c = csv.writer(myfile)
for r in sh.rows:
c.writerow([cell.value for cell in r])
return myfile
def list_to_xlsx_response(self, data, title='report', header=None,
widths=None):
""" Make 2D list into a xlsx response for download
data can be a 2d array or a dict of 2d arrays
like {'sheet_1': [['A1', 'B1']]}
"""
wb = self.list_to_workbook(data, title, header, widths)
return self.build_xlsx_response(wb, title=title)
def list_to_csv_response(self, data, title='report', header=None,
widths=None):
""" Make 2D list into a csv response for download data.
"""
wb = self.list_to_workbook(data, title, header, widths)
return self.build_csv_response(wb, title=title)
def add_aggregates(self, queryset, display_fields):
agg_funcs = {
'Avg': Avg, 'Min': Min, 'Max': Max, 'Count': Count, 'Sum': Sum
}
for display_field in display_fields:
if display_field.aggregate:
func = agg_funcs[display_field.aggregate]
full_name = display_field.path + display_field.field
queryset = queryset.annotate(func(full_name))
return queryset
def report_to_list(self, queryset, display_fields, user=None, property_filters=[], preview=False):
""" Create list from a report with all data filtering.
queryset: initial queryset to generate results
display_fields: list of field references or DisplayField models
user: requesting user. If left as None - there will be no permission check
property_filters: ???
preview: return only first 50 rows
Returns list, message in case of issues.
"""
model_class = queryset.model
def can_change_or_view(model):
""" Return True iff `user` has either change or view permission
for `model`. """
if user is None:
return True
model_name = model._meta.model_name
app_label = model._meta.app_label
can_change = user.has_perm(app_label + '.change_' + model_name)
can_view = user.has_perm(app_label + '.view_' + model_name)
return can_change or can_view
if not can_change_or_view(model_class):
return [], 'Permission Denied'
if isinstance(display_fields, list):
# Convert list of strings to DisplayField objects.
new_display_fields = []
for display_field in display_fields:
field_list = display_field.split('__')
field = field_list[-1]
path = '__'.join(field_list[:-1])
if path:
path += '__' # Legacy format to append a __ here.
new_model = get_model_from_path_string(model_class, path)
try:
model_field = new_model._meta.get_field_by_name(field)[0]
except:
try:
model_field = new_model._meta.get_field(field)
except:
model_field = None
choices = model_field.choices
new_display_fields.append(DisplayField(
path, '', field, '', '', None, None, choices, ''
))
display_fields = new_display_fields
# Build group-by field list.
group = [df.path + df.field for df in display_fields if df.group]
# To support group-by with multiple fields, we turn all the other
# fields into aggregations. The default aggregation is `Max`.
if group:
for field in display_fields:
if (not field.group) and (not field.aggregate):
field.aggregate = 'Max'
message = ""
objects = self.add_aggregates(queryset, display_fields)
# Display Values
display_field_paths = []
property_list = {}
custom_list = {}
display_totals = {}
for i, display_field in enumerate(display_fields):
model = get_model_from_path_string(model_class, display_field.path)
if display_field.field_type == "Invalid":
continue
if not model or can_change_or_view(model):
display_field_key = display_field.path + display_field.field
if display_field.field_type == "Property":
property_list[i] = display_field_key
elif display_field.field_type == "Custom Field":
custom_list[i] = display_field_key
elif display_field.aggregate == "Avg":
display_field_key += '__avg'
elif display_field.aggregate == "Max":
display_field_key += '__max'
elif display_field.aggregate == "Min":
display_field_key += '__min'
elif display_field.aggregate == "Count":
display_field_key += '__count'
elif display_field.aggregate == "Sum":
display_field_key += '__sum'
if display_field.field_type not in ('Property', 'Custom Field'):
display_field_paths.append(display_field_key)
if display_field.total:
display_totals[display_field_key] = Decimal(0)
else:
message += 'Error: Permission denied on access to {0}.'.format(
display_field.name
)
def increment_total(display_field_key, val):
""" Increment display total by `val` if given `display_field_key` in
`display_totals`.
"""
if display_field_key in display_totals:
if isinstance(val, bool):
# True: 1, False: 0
display_totals[display_field_key] += Decimal(val)
elif isinstance(val, Number):
display_totals[display_field_key] += Decimal(str(val))
elif val:
display_totals[display_field_key] += Decimal(1)
# Select pk for primary and m2m relations in order to retrieve objects
# for adding properties to report rows. Group-by queries do not support
# Property nor Custom Field filters.
if not group:
display_field_paths.insert(0, 'pk')
m2m_relations = []
for position, property_path in property_list.items():
property_root = property_path.split('__')[0]
root_class = model_class
try:
property_root_class = getattr(root_class, property_root)
except AttributeError: # django-hstore schema compatibility
continue
if type(property_root_class) == ManyToManyDescriptor:
display_field_paths.insert(1, '%s__pk' % property_root)
m2m_relations.append(property_root)
if group:
values = objects.values(*group)
values = self.add_aggregates(values, display_fields)
filtered_report_rows = [
[row[field] for field in display_field_paths]
for row in values
]
for row in filtered_report_rows:
for pos, field in enumerate(display_field_paths):
increment_total(field, row[pos])
else:
filtered_report_rows = []
values_and_properties_list = []
values_list = objects.values_list(*display_field_paths)
for row in values_list:
row = list(row)
values_and_properties_list.append(row[1:])
obj = None # we will get this only if needed for more complex processing
# related_objects
remove_row = False
# filter properties (remove rows with excluded properties)
for property_filter in property_filters:
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
root_relation = property_filter.path.split('__')[0]
if root_relation in m2m_relations:
pk = row[0]
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, [property_filter.field], m2m_obj)
else:
val = None
else:
if property_filter.field_type == 'Custom Field':
for relation in property_filter.path.split('__'):
if hasattr(obj, root_relation):
obj = getattr(obj, root_relation)
val = obj.get_custom_value(property_filter.field)
else:
val = reduce(getattr, (property_filter.path + property_filter.field).split('__'), obj)
if property_filter.filter_property(val):
remove_row = True
values_and_properties_list.pop()
break
if not remove_row:
for i, field in enumerate(display_field_paths[1:]):
increment_total(field, row[i + 1])
for position, display_property in property_list.items():
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
relations = display_property.split('__')
root_relation = relations[0]
if root_relation in m2m_relations:
pk = row.pop(0)
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, relations[1:], m2m_obj)
else:
val = None
else:
# Could error if a related field doesn't exist
try:
val = reduce(getattr, relations, obj)
except AttributeError:
val = None
values_and_properties_list[-1].insert(position, val)
increment_total(display_property, val)
for position, display_custom in custom_list.items():
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
val = obj.get_custom_value(display_custom)
values_and_properties_list[-1].insert(position, val)
increment_total(display_custom, val)
filtered_report_rows.append(values_and_properties_list[-1])
if preview and len(filtered_report_rows) == 50:
break
# Sort results if requested.
if hasattr(display_fields, 'filter'):
defaults = {
None: text_type,
datetime.date: lambda: datetime.date(datetime.MINYEAR, 1, 1),
datetime.datetime: lambda: datetime.datetime(datetime.MINYEAR, 1, 1),
}
# Order sort fields in reverse order so that ascending, descending
# sort orders work together (based on Python's stable sort). See
# http://stackoverflow.com/questions/6666748/ for details.
sort_fields = display_fields.filter(sort__gt=0).order_by('-sort')
sort_values = sort_fields.values_list('position', 'sort_reverse')
for pos, reverse in sort_values:
column = (row[pos] for row in filtered_report_rows)
type_col = (type(val) for val in column if val is not None)
field_type = next(type_col, None)
default = defaults.get(field_type, field_type)()
filtered_report_rows = sorted(
filtered_report_rows,
key=lambda row: self.sort_helper(row[pos], default),
reverse=reverse,
)
values_and_properties_list = filtered_report_rows
# Build mapping from display field position to choices list.
choice_lists = {}
for df in display_fields:
if df.choices and hasattr(df, 'choices_dict'):
df_choices = df.choices_dict
# Insert blank and None as valid choices.
df_choices[''] = ''
df_choices[None] = ''
choice_lists[df.position] = df_choices
# Build mapping from display field position to format.
display_formats = {}
for df in display_fields:
if hasattr(df, 'display_format') and df.display_format:
display_formats[df.position] = df.display_format
def formatter(value, style):
# Convert value to Decimal to apply numeric formats.
try:
value = Decimal(value)
except Exception:
pass
try:
return style.string.format(value)
except ValueError:
return value
# Iterate rows and convert values by choice lists and field formats.
final_list = []
for row in values_and_properties_list:
row = list(row)
for position, choice_list in choice_lists.items():
try:
row[position] = text_type(choice_list[row[position]])
except Exception:
row[position] = text_type(row[position])
for pos, style in display_formats.items():
row[pos] = formatter(row[pos], style)
final_list.append(row)
values_and_properties_list = final_list
if display_totals:
display_totals_row = []
fields_and_properties = list(display_field_paths[0 if group else 1:])
for position, value in property_list.items():
fields_and_properties.insert(position, value)
for field in fields_and_properties:
display_totals_row.append(display_totals.get(field, ''))
# Add formatting to display totals.
for pos, style in display_formats.items():
display_totals_row[pos] = formatter(display_totals_row[pos], style)
values_and_properties_list.append(
['TOTALS'] + (len(fields_and_properties) - 1) * ['']
)
values_and_properties_list.append(display_totals_row)
return values_and_properties_list, message
def sort_helper(self, value, default):
if value is None:
value = default
if isinstance(value, string_types):
value = value.lower()
return value
class GetFieldsMixin(object):
def get_fields(self, model_class, field_name='', path='', path_verbose=''):
""" Get fields and meta data from a model
:param model_class: A django model class
:param field_name: The field name to get sub fields from
:param path: path of our field in format
field_name__second_field_name__ect__
:param path_verbose: Human readable version of above
:returns: Returns fields and meta data about such fields
fields: Django model fields
custom_fields: fields from django-custom-field if installed
properties: Any properties the model has
path: Our new path
path_verbose: Our new human readable path
:rtype: dict
"""
fields = get_direct_fields_from_model(model_class)
properties = get_properties_from_model(model_class)
custom_fields = get_custom_fields_from_model(model_class)
app_label = model_class._meta.app_label
model = model_class
if field_name != '':
field = model_class._meta.get_field(field_name)
direct = field.concrete
if path_verbose:
path_verbose += "::"
# TODO: need actual model name to generate choice list (not pluralized field name)
# - maybe store this as a separate value?
if field.many_to_many and hasattr(field, 'm2m_reverse_field_name'):
path_verbose += field.m2m_reverse_field_name()
else:
path_verbose += field.name
path += field_name
path += '__'
if direct:
new_model = field.related_model
path_verbose = new_model.__name__.lower()
else: # Indirect related field
new_model = field.related_model
path_verbose = new_model.__name__.lower()
fields = get_direct_fields_from_model(new_model)
custom_fields = get_custom_fields_from_model(new_model)
properties = get_properties_from_model(new_model)
app_label = new_model._meta.app_label
model = new_model
return {
'fields': fields,
'custom_fields': custom_fields,
'properties': properties,
'path': path,
'path_verbose': path_verbose,
'app_label': app_label,
'model': model,
}
def get_related_fields(self, model_class, field_name, path="", path_verbose=""):
""" Get fields for a given model """
if field_name:
field = model_class._meta.get_field(field_name)
direct = field.concrete
if direct:
try:
related_field = field.remote_field
except AttributeError:
# Needed for Django < 1.9
related_field = field.related
try:
new_model = related_field.parent_model()
except AttributeError:
new_model = related_field.model
else:
# Indirect related field
new_model = field.related_model
if path_verbose:
path_verbose += "::"
path_verbose += field.name
path += field_name
path += '__'
else:
new_model = model_class
new_fields = get_relation_fields_from_model(new_model)
model_ct = ContentType.objects.get_for_model(new_model)
return (new_fields, model_ct, path)
|
py | 1a524ddfd62a8cfe95a4080b1139b0a18a216109 | """
Django settings for tweetme project.
Generated by 'django-admin startproject' using Django 1.10.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
#MANAGE.PY
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=_b*5n+m9osa$5#93m)2-e)v16wzkioq(oyu0k)n4e7fiuojyv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'rest_framework',
'accounts',
'hashtags',
'tweets',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tweetme.urls'
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL ="/"
LOGOUT_REDIRECT_URL = LOGIN_REDIRECT_URL
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tweetme.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
# will not be served, long term storage
os.path.join(BASE_DIR, "static-storage"),
]
# will be served
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static-serve")
# STATIC_ROOT = "webapps/abc/static"
CRISPY_TEMPLATE_PACK = 'bootstrap3' |
py | 1a524e36d244d41f114ac6a17775ff906d3d02be | import torch
import torch.nn as nn
from .bap import BAP
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck_bk(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, use_bap=False):
super(Bottleneck, self).__init__()
## add by zengh
self.use_bap = use_bap
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
if self.use_bap:
self.bap = BAP()
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x ## feature map
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
feature_map = out
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.use_bap:
attention = out[:,:32,:,:]
raw_features,pooling_features = self.bap(feature_map,attention)
return attention,raw_features,pooling_features
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None,use_bap = False):
super(ResNet, self).__init__()
self.use_bap = use_bap
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2],use_bap=use_bap)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc_new = nn.Linear(512*32,num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, use_bap = False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
# if use_bap:
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer,use_bap=use_bap))
if use_bap:
return nn.Sequential(*layers)
for _ in range(2, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.use_bap:
attention,raw_features,x = x
# print(attention.shape,raw_features.shape,x.shape)
if not self.use_bap:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc_new(x)
if self.use_bap:
return attention,raw_features,x
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
pretrained_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model_dict = model.state_dict()
state_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict.keys()}
# model.load_state_dict(state_dict)
model_dict.update(state_dict)
model.load_state_dict(model_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
if __name__ == '__main__':
net = resnet50(use_bap=True,pretrained=True)
input = torch.Tensor(4,3,224,224)
out = net(input)
# print(net) |
py | 1a524ff782c5f241668a5b5616c5dee44b94e11c | import unittest
import numpy as np
import pysal
#from pysal.spreg.twosls_sp_regimes import GM_Lag_Regimes
from pysal.spreg import utils
#from pysal.spreg.twosls_sp import GM_Lag
from pysal.contrib.handler import Model
from functools import partial
GM_Lag_Regimes = partial(Model, mtype='GM_Lag_Regimes')
GM_Lag = partial(Model, mtype='GM_Lag')
class TestGMLag_Regimes(unittest.TestCase):
def setUp(self):
self.w = pysal.queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y = np.array(self.db.by_col("CRIME"))
self.y = np.reshape(y, (49,1))
self.r_var = 'NSA'
self.regimes = self.db.by_col(self.r_var)
def test___init__(self):
#Matches SpaceStat
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("HOVAL"))
self.X = np.array(X).T
reg = GM_Lag_Regimes(self.y, self.X, self.regimes, w=self.w, sig2n_k=True, regime_lag_sep=False, regime_err_sep=False)
betas = np.array([[ 45.14892906],
[ -1.42593383],
[ -0.11501037],
[ 40.99023016],
[ -0.81498302],
[ -0.28391409],
[ 0.4736163 ]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
e_5 = np.array([[ -1.47960519],
[ -7.93748769],
[ -5.88561835],
[-13.37941105],
[ 5.2524303 ]])
np.testing.assert_array_almost_equal(reg.e_pred[0:5], e_5, 7)
h_0 = np.array([[ 0. , 0. , 0. , 1. , 19.531 ,
80.467003 , 0. , 0. , 18.594 , 35.4585005]])
np.testing.assert_array_almost_equal(reg.h[0]*np.eye(10), h_0)
self.assertEqual(reg.k, 7)
self.assertEqual(reg.kstar, 1)
self.assertAlmostEqual(reg.mean_y, 35.128823897959187, 7)
self.assertEqual(reg.n, 49)
self.assertAlmostEqual(reg.pr2, 0.6572182131915739, 7)
self.assertAlmostEqual(reg.pr2_e, 0.5779687278635434, 7)
pfora1a2 = np.array([ -2.15017629, -0.30169328, -0.07603704, -22.06541809,
0.45738058, 0.02805828, 0.39073923])
np.testing.assert_array_almost_equal(reg.pfora1a2[0], pfora1a2, 7)
predy_5 = np.array([[ 13.93216104],
[ 23.46424269],
[ 34.43510955],
[ 44.32473878],
[ 44.39117516]])
np.testing.assert_array_almost_equal(reg.predy[0:5], predy_5, 7)
predy_e_5 = np.array([[ 17.20558519],
[ 26.73924169],
[ 36.51239935],
[ 45.76717105],
[ 45.4790797 ]])
np.testing.assert_array_almost_equal(reg.predy_e[0:5], predy_e_5, 7)
q_5 = np.array([[ 0. , 0. , 18.594 , 35.4585005]])
np.testing.assert_array_almost_equal(reg.q[0]*np.eye(4), q_5)
self.assertEqual(reg.robust, 'unadjusted')
self.assertAlmostEqual(reg.sig2n_k, 109.76462904625834, 7)
self.assertAlmostEqual(reg.sig2n, 94.08396775393571, 7)
self.assertAlmostEqual(reg.sig2, 109.76462904625834, 7)
self.assertAlmostEqual(reg.std_y, 16.732092091229699, 7)
u_5 = np.array([[ 1.79381896],
[ -4.66248869],
[ -3.80832855],
[-11.93697878],
[ 6.34033484]])
np.testing.assert_array_almost_equal(reg.u[0:5], u_5, 7)
self.assertAlmostEqual(reg.utu, 4610.11441994285, 7)
varb = np.array([ 1.23841820e+00, -3.65620114e-02, -1.21919663e-03,
1.00057547e+00, -2.07403182e-02, -1.27232693e-03,
-1.77184084e-02])
np.testing.assert_array_almost_equal(reg.varb[0], varb, 7)
vm = np.array([ 1.35934514e+02, -4.01321561e+00, -1.33824666e-01,
1.09827796e+02, -2.27655334e+00, -1.39656494e-01,
-1.94485452e+00])
np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
x_0 = np.array([[ 0. , 0. , 0. , 1. , 19.531 ,
80.467003]])
np.testing.assert_array_almost_equal(reg.x[0]*np.eye(6), x_0, 7)
y_5 = np.array([[ 15.72598 ],
[ 18.801754],
[ 30.626781],
[ 32.38776 ],
[ 50.73151 ]])
np.testing.assert_array_almost_equal(reg.y[0:5], y_5, 7)
yend_5 = np.array([[ 24.7142675 ],
[ 26.24684033],
[ 29.411751 ],
[ 34.64647575],
[ 40.4653275 ]])
np.testing.assert_array_almost_equal(reg.yend[0:5]*np.array([[1]]), yend_5, 7)
z_0 = np.array([[ 0. , 0. , 0. , 1. , 19.531 ,
80.467003 , 24.7142675]])
np.testing.assert_array_almost_equal(reg.z[0]*np.eye(7), z_0, 7)
zthhthi = np.array([ 1.00000000e+00, -2.35922393e-16, 5.55111512e-17,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-4.44089210e-16, 2.22044605e-16, 0.00000000e+00,
0.00000000e+00])
np.testing.assert_array_almost_equal(reg.zthhthi[0], zthhthi, 7)
chow_regi = np.array([[ 0.19692667, 0.65721307],
[ 0.5666492 , 0.45159351],
[ 0.45282066, 0.5009985 ]])
np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
self.assertAlmostEqual(reg.chow.joint[0], 0.82409867601863462, 7)
def test_init_discbd(self):
#Matches SpaceStat.
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, lag_q=False, w=self.w, sig2n_k=True, regime_lag_sep=False, regime_err_sep=False)
tbetas = np.array([[ 42.7266306 ],
[ -0.15552345],
[ 37.70545276],
[ -0.5341577 ],
[ -0.68305796],
[ -0.37106077],
[ 0.55809516]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
vm = np.array([ 270.62979422, 3.62539081, 327.89638627, 6.24949355,
-5.25333106, -6.01743515, -4.19290074])
np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
e_3 = np.array([[-0.33142796],
[-9.51719607],
[-7.86272153]])
np.testing.assert_array_almost_equal(reg.e_pred[0:3], e_3, 7)
u_3 = np.array([[ 4.51839601],
[-5.67363147],
[-5.1927562 ]])
np.testing.assert_array_almost_equal(reg.u[0:3], u_3, 7)
predy_3 = np.array([[ 11.20758399],
[ 24.47538547],
[ 35.8195372 ]])
np.testing.assert_array_almost_equal(reg.predy[0:3], predy_3, 7)
predy_e_3 = np.array([[ 16.05740796],
[ 28.31895007],
[ 38.48950253]])
np.testing.assert_array_almost_equal(reg.predy_e[0:3], predy_e_3, 7)
chow_regi = np.array([[ 0.13130991, 0.71707772],
[ 0.04740966, 0.82763357],
[ 0.15474413, 0.6940423 ]])
np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
self.assertAlmostEqual(reg.chow.joint[0], 0.31248100032096549, 7)
def test_lag_q(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w, sig2n_k=True, regime_lag_sep=False, regime_err_sep=False)
tbetas = np.array([[ 37.87698329],
[ -0.89426982],
[ 31.4714777 ],
[ -0.71640525],
[ -0.28494432],
[ -0.2294271 ],
[ 0.62996544]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
vm = np.array([ 128.25714554, -0.38975354, 95.7271044 , -1.8429218 ,
-1.75331978, -0.18240338, -1.67767464])
np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
chow_regi = np.array([[ 0.43494049, 0.50957463],
[ 0.02089281, 0.88507135],
[ 0.01180501, 0.91347943]])
np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
self.assertAlmostEqual(reg.chow.joint[0], 0.54288190938307757, 7)
def test_all_regi(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w, regime_lag_sep=False, regime_err_sep=True)
tbetas = np.array([[ 37.87698329, -0.89426982, 31.4714777 , -0.71640525,
-0.28494432, -0.2294271 , 0.62996544]])
np.testing.assert_array_almost_equal(tbetas, reg.betas.T)
vm = np.array([ 70.38291551, -0.64868787, 49.25453215, -0.62851534,
-0.75413453, -0.12674433, -0.97179236])
np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
e_3 = np.array([[-2.66997799],
[-7.69786264],
[-4.39412782]])
np.testing.assert_array_almost_equal(reg.e_pred[0:3], e_3, 7)
u_3 = np.array([[ 1.13879007],
[-3.76873198],
[-1.89671717]])
np.testing.assert_array_almost_equal(reg.u[0:3], u_3, 7)
predy_3 = np.array([[ 14.58718993],
[ 22.57048598],
[ 32.52349817]])
np.testing.assert_array_almost_equal(reg.predy[0:3], predy_3, 7)
predy_e_3 = np.array([[ 18.39595799],
[ 26.49961664],
[ 35.02090882]])
np.testing.assert_array_almost_equal(reg.predy_e[0:3], predy_e_3, 7)
chow_regi = np.array([[ 0.60091096, 0.43823066],
[ 0.03006744, 0.8623373 ],
[ 0.01943727, 0.88912016]])
np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
self.assertAlmostEqual(reg.chow.joint[0], 0.88634854058300516, 7)
def test_all_regi_sig2(self):
#Artficial:
n = 256
x1 = np.random.uniform(-10,10,(n,1))
x2 = np.random.uniform(1,5,(n,1))
q = x2 + np.random.normal(0,1,(n,1))
x = np.hstack((x1,x2))
y = np.dot(np.hstack((np.ones((n,1)),x)),np.array([[1],[0.5],[2]])) + np.random.normal(0,1,(n,1))
latt = int(np.sqrt(n))
w = pysal.lat2W(latt,latt)
w.transform='r'
regi = [0]*(n/2) + [1]*(n/2)
model = GM_Lag_Regimes(y, x1, regi, q=q, yend=x2, w=w, regime_lag_sep=True, regime_err_sep=True)
w1 = pysal.lat2W(latt/2,latt)
w1.transform='r'
model1 = GM_Lag(y[0:(n/2)].reshape((n/2),1), x1[0:(n/2)], yend=x2[0:(n/2)], q=q[0:(n/2)], w=w1)
model2 = GM_Lag(y[(n/2):n].reshape((n/2),1), x1[(n/2):n], yend=x2[(n/2):n], q=q[(n/2):n], w=w1)
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_array_almost_equal(model.betas,tbetas)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
np.testing.assert_array_almost_equal(model.vm.diagonal(), vm, 6)
#Columbus:
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w,regime_lag_sep=True, regime_err_sep = True)
tbetas = np.array([[ 42.35827477],
[ -0.09472413],
[ -0.68794223],
[ 0.54482537],
[ 32.24228762],
[ -0.12304063],
[ -0.46840307],
[ 0.67108156]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
vm = np.array([ 200.92894859, 4.56244927, -4.85603079, -2.9755413 ,
0. , 0. , 0. , 0. ])
np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
e_3 = np.array([[ -1.32209547],
[-13.15611199],
[-11.62357696]])
np.testing.assert_array_almost_equal(reg.e_pred[0:3], e_3, 7)
u_3 = np.array([[ 6.99250069],
[-7.5665856 ],
[-7.04753328]])
np.testing.assert_array_almost_equal(reg.u[0:3], u_3, 7)
predy_3 = np.array([[ 8.73347931],
[ 26.3683396 ],
[ 37.67431428]])
np.testing.assert_array_almost_equal(reg.predy[0:3], predy_3, 7)
predy_e_3 = np.array([[ 17.04807547],
[ 31.95786599],
[ 42.25035796]])
np.testing.assert_array_almost_equal(reg.predy_e[0:3], predy_e_3, 7)
chow_regi = np.array([[ 1.51825373e-01, 6.96797034e-01],
[ 3.20105698e-04, 9.85725412e-01],
[ 8.58836996e-02, 7.69476896e-01],
[ 1.01357290e-01, 7.50206873e-01]])
np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
self.assertAlmostEqual(reg.chow.joint[0], 0.38417230022512161, 7)
def test_fixed_const(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w, constant_regi='one', regime_lag_sep=False, regime_err_sep=False)
tbetas = np.array([[ -0.37658823],
[ -0.9666079 ],
[ 35.5445944 ],
[ -0.45793559],
[ -0.24216904],
[ 0.62500602]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
vm = np.array([ 1.4183697 , -0.05975784, -0.27161863, -0.62517245, 0.02266177,
0.00312976])
np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
e_3 = np.array([[ 0.17317815],
[-5.53766328],
[-3.82889307]])
np.testing.assert_array_almost_equal(reg.e_pred[0:3], e_3, 7)
u_3 = np.array([[ 3.10025518],
[-1.83150689],
[-1.49598494]])
np.testing.assert_array_almost_equal(reg.u[0:3], u_3, 7)
predy_3 = np.array([[ 12.62572482],
[ 20.63326089],
[ 32.12276594]])
np.testing.assert_array_almost_equal(reg.predy[0:3], predy_3, 7)
predy_e_3 = np.array([[ 15.55280185],
[ 24.33941728],
[ 34.45567407]])
np.testing.assert_array_almost_equal(reg.predy_e[0:3], predy_e_3, 7)
chow_regi = np.array([[ 1.85767047e-01, 6.66463269e-01],
[ 1.19445012e+01, 5.48089036e-04]])
np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
self.assertAlmostEqual(reg.chow.joint[0], 12.017256217621382, 7)
def test_names(self):
y_var = 'CRIME'
x_var = ['INC']
x = np.array([self.db.by_col(name) for name in x_var]).T
yd_var = ['HOVAL']
yd = np.array([self.db.by_col(name) for name in yd_var]).T
q_var = ['DISCBD']
q = np.array([self.db.by_col(name) for name in q_var]).T
r_var = 'NSA'
reg = GM_Lag_Regimes(self.y, x, self.regimes, yend=yd, q=q, w=self.w, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='columbus', name_w='columbus.gal', regime_lag_sep=False, regime_err_sep=False)
betas = np.array([[ 37.87698329],
[ -0.89426982],
[ 31.4714777 ],
[ -0.71640525],
[ -0.28494432],
[ -0.2294271 ],
[ 0.62996544]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([ 109.93469618, -0.33407447, 82.05180377, -1.57964725,
-1.50284553, -0.15634575, -1.43800683])
np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
chow_regi = np.array([[ 0.50743058, 0.47625326],
[ 0.02437494, 0.87593468],
[ 0.01377251, 0.9065777 ]])
np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
self.assertAlmostEqual(reg.chow.joint[0], 0.63336222761359162, 7)
self.assertListEqual(reg.name_x, ['0_CONSTANT', '0_INC', '1_CONSTANT', '1_INC'])
self.assertListEqual(reg.name_yend, ['0_HOVAL', '1_HOVAL', '_Global_W_CRIME'])
self.assertListEqual(reg.name_q, ['0_DISCBD', '0_W_INC', '0_W_DISCBD', '1_DISCBD', '1_W_INC', '1_W_DISCBD'])
self.assertEqual(reg.name_y, y_var)
if __name__ == '__main__':
unittest.main()
|
py | 1a525004c374c45693a4312ff4ffd1a6a5f3acb5 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import math
import platform
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_DATA_TYPES = [dtypes.half, dtypes.float32, dtypes.float64]
# TODO(b/143684500): Eigen to support complex sqrt
if not test_util.IsBuiltWithNvcc() and platform.system() != "Windows" \
and not test.is_built_with_rocm():
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
_TEST_PARAM_VALUES = [
# learning_rate, rho, momentum, epsilon, centered
[0.05, 0.9, 0.0, 1e-3, True],
[0.05, 0.9, 0.0, 1e-3, False],
[0.1, 0.9, 0.0, 1e-3, True],
[0.01, 0.9, 0.0, 1e-5, True],
[0.01, 0.9, 0.9, 1e-5, True],
]
_TESTPARAMS = [
[data_type] + values
for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
]
class RMSpropOptimizerTest(test.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum,
epsilon, centered):
rms_t = rms * rho + (1 - rho) * g * g
if centered:
mg_t = mg * rho + (1 - rho) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
if momentum > 0.:
mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))
var_t = var - mom_t
else:
mom_t = mom
var_t = var - lr * g / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, rho, momentum, epsilon, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue
if centered:
mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue
denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex]
else:
denom_t = rms_t[gindex]
if momentum > 0.:
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t +
epsilon)
var_t[gindex] = var[gindex] - mom_t[gindex]
else:
mom_t[gindex] = mom[gindex]
var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
@test_util.run_deprecated_v1
def testDense(self):
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(var1_np, dtype=dtype)
grads0 = constant_op.constant(grads0_np, dtype=dtype)
grads1 = constant_op.constant(grads1_np, dtype=dtype)
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
mg1 = opt.get_slot(var1, "mg")
else:
mg0 = None
mg1 = None
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,
momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,
momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testDenseWithLearningRateDecay(self):
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,
epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,
epsilon, centered)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testDenseWithLearningRateInverseTimeDecay(self):
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
opt = rmsprop.RMSprop(
learning_rate=lr_schedule,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,
epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,
epsilon, centered)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in _DATA_TYPES:
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = rmsprop.RMSprop(
learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0,
centered=False).minimize(
loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[0., 1.]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariableCentered(self):
for dtype in _DATA_TYPES:
if test_util.is_xla_enabled() and dtype.is_complex:
self.skipTest("b/143578550")
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
# loss = lambda: pred * pred # pylint: disable=cell-var-from-loop
sgd_op = rmsprop.RMSprop(
learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0,
centered=True).minimize(
loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testSparse(self):
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([1]))
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([1]))
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
else:
mg0 = None
mg1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
learning_rate, rho, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
learning_rate, rho, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testCallableParams(self):
with context.eager_mode():
for dtype in _DATA_TYPES:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
rho = lambda: 0.9
momentum = lambda: 0.0
epsilon = 1.0
opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0))
]), self.evaluate(var1))
def testConstructRMSpropWithLR(self):
opt = rmsprop.RMSprop(lr=1.0)
opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0)
opt_3 = rmsprop.RMSprop(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = variables.Variable(1.)
v2 = variables.Variable(1.)
opt = rmsprop.RMSprop(1., momentum=0., centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and one unique slot variable for v1 and v2.
self.assertEqual(3, len(set({id(v) for v in opt.variables()})))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len(set({id(v) for v in opt.variables()})))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and three unique slot variables for v1 and v2
self.assertEqual(7, len(set({id(v) for v in opt.variables()})))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
class SlotColocationTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters([True, False])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testRunMinimizeOnGPUForCPUVariables(self, use_resource):
with ops.device("/device:CPU:0"):
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0],
dtype=dtypes.float32)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
def loss():
return 5 * var0 + 3 * var1
opt = rmsprop.RMSprop(
learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0)
# Fetch params to validate initial values
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step through optimizer on GPU.
# Slot variables are created the first time optimizer is used on some
# variable. This tests that slot variables will be colocated with the base
# variable.
with ops.device("/device:GPU:0"):
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
opt_op = opt.minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params, All variables should have decreased.
self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)),
msg="updated variables: %s" % self.evaluate(var0))
self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)),
msg="updated variables: %s" % self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
py | 1a52504b5ebec2ca9655f20b17171970be57d49f | from numpy.testing import Tester
test = Tester().test
|
py | 1a525079ce024c05fc5e8a37cdc2fd2de1d64615 | from typing import Dict, List
from ravendb.documents.queries.query import QueryResult
class Explanations:
def __init__(self):
self.explanations: Dict[str, List[str]] = {}
def update(self, query_result: QueryResult) -> None:
self.explanations = query_result.explanations
class ExplanationOptions:
def __init__(self, group_key: str = None):
self.group_key = group_key
|
py | 1a5252bbbb7c9aa2868a4b0fe0b05ab0b715bb71 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ponytone.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | 1a5253038cdabf6b1f6c68825d3ea9261edfea9c |
# coding: utf-8
# In[121]:
import numpy as np
import pandas as pd
from os.path import join as opj
# ### Matching based on Volumes
# * Volume bins
# * 100 - 150
# * 150 - 200
# * 200 - 250
# * 250 - 300
# In[122]:
# ## Create a function to do volumes matching
# In[147]:
# def volumes_matching(volumes_bins, df_demographics, df_TD_phenotype, df_AUT_phenotype):
# # Load demographics file
#
# # demographics_file_path = '/home1/varunk/Autism-Connectome-Analysis-brain_connectivity/notebooks/demographics.csv'
# # phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
# # volumes_bins = np.array([[0,150],[151,200],[201,250],[251,300]])
#
#
# # df_demographics = pd.read_csv(demographics_file_path)
# df_demographics_volumes = df_demographics.as_matrix(['SITE_NAME','VOLUMES']).squeeze()
#
#
#
# # df_phenotype = pd.read_csv(phenotype_file_path)
# # df_phenotype = df_phenotype.sort_values(['SUB_ID'])
#
#
#
# bins_volumes_AUT_data = []
# bins_volumes_TD_data = []
#
# for counter, _bin in enumerate(volumes_bins):
# df_demographics_volumes_selected_bin = df_demographics_volumes[np.where(np.logical_and((df_demographics_volumes[:,1] >= _bin[0]),(df_demographics_volumes[:,1] <= _bin[1])))]
#
#
# selected_AUT = pd.DataFrame()
# selected_TD = pd.DataFrame()
# for site in df_demographics_volumes_selected_bin:
# # print(site[0])
# selected_AUT = pd.concat([selected_AUT,df_AUT_phenotype.loc[(df_AUT_phenotype['SEX'] == 1)
# & (df_AUT_phenotype['DSM_IV_TR'] == 1)
# & (df_AUT_phenotype['SITE_ID'] == site[0])]])
# selected_TD = pd.concat([selected_TD,df_TD_phenotype.loc[(df_TD_phenotype['SEX'] == 1)
# & (df_TD_phenotype['DSM_IV_TR'] == 0)
# & (df_TD_phenotype['SITE_ID'] == site[0])]])
#
# bins_volumes_AUT_data.append(selected_AUT)
# bins_volumes_TD_data.append(selected_TD)
#
# matched_df_TD,matched_df_AUT = matching(volumes_bins, bins_volumes_TD_data, bins_volumes_AUT_data)
# # sub_ids = selected_df_TD.as_matrix(['SUB_ID']).squeeze()
# # matched_df_TD.to_csv('volumes_matched_TD.csv')
# return matched_df_TD,matched_df_AUT
def matching(bins, bins_TD_data, bins_AUT_data, randomize = False):
# num_bins = 4
print('Original data stats')
print('Range ','TD ','AUT ','Ratio TD/AUT')
ratio = np.zeros((len(bins_TD_data)))
for i in range(len(bins_TD_data)):
ratio[i] = bins_TD_data[i].shape[0]/bins_AUT_data[i].shape[0]
print(bins[i],bins_TD_data[i].shape[0],bins_AUT_data[i].shape[0], ratio[i])
min_ratio = np.min(ratio)
min_index = np.argmin(ratio)
new_TD = np.zeros((len(bins_TD_data)))
new_AUT = np.zeros((len(bins_AUT_data)))
# matched_df_AUT = None
# matched_df_TD = None
if min_ratio < 1:
_ratio = 1.0 / ratio
min_ratio = np.min(_ratio)
print('Ratio = ',min_ratio)
# -------------------------------------------
print('Matched data stats')
print('Range ','TD ','AUT ')
for i in range(len(bins_TD_data)):
new_AUT[i] = np.floor(bins_TD_data[i].shape[0] * min_ratio)
print(bins[i],bins_TD_data[i].shape[0],new_AUT[i])
# Now loop over all the bins created and select the specific number of subjects randomly from each TD bin
# AUT_idx_list = []
selected_df_AUT = pd.DataFrame()
selected_df_TD = pd.DataFrame()
for i in range(len(bins_AUT_data)):
idx = np.arange(len(bins_AUT_data[i]))
if randomize == True:
np.random.shuffle(idx)
idx = idx[0:int(new_AUT[i])]
# AUT_idx_list.append(idx)
selected_df_AUT = pd.concat([selected_df_AUT, bins_AUT_data[i].iloc[idx]])
selected_df_TD = pd.concat([selected_df_TD, bins_TD_data[i]])
matched_df_AUT = selected_df_AUT.sort_values(['SUB_ID'])
matched_df_TD = selected_df_TD.sort_values(['SUB_ID'])
return matched_df_TD, matched_df_AUT
# -------------------------------------
print('Matched data stats')
print('Range ','TD ','AUT ')
for i in range(len(bins_TD_data)):
new_TD[i] = np.floor(bins_AUT_data[i].shape[0] * min_ratio)
print(bins[i],new_TD[i],bins_AUT_data[i].shape[0])
# Now loop over all the bins created and select the specific number of subjects randomly from each TD bin
# TD_idx_list = []
selected_df_TD = pd.DataFrame()
selected_df_AUT = pd.DataFrame()
for i in range(len(bins_TD_data)):
idx = np.arange(len(bins_TD_data[i]))
if randomize == True:
np.random.shuffle(idx)
idx = idx[0:int(new_TD[i])]
# TD_idx_list.append(idx)
selected_df_TD = pd.concat([selected_df_TD, bins_TD_data[i].iloc[idx]])
selected_df_AUT = pd.concat([selected_df_AUT, bins_AUT_data[i]])
matched_df_TD = selected_df_TD.sort_values(['SUB_ID'])
matched_df_AUT = selected_df_AUT.sort_values(['SUB_ID'])
return matched_df_TD,matched_df_AUT
# In[150]:
# Usage
# demographics_file_path = '/home1/varunk/Autism-Connectome-Analysis-brain_connectivity/notebooks/demographics.csv'
# phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
#
# df_demographics = pd.read_csv(demographics_file_path)
#
#
# df_phenotype = pd.read_csv(phenotype_file_path)
# df_phenotype = df_phenotype.sort_values(['SUB_ID'])
#
# volumes_bins = np.array([[0,150],[151,200],[201,250],[251,300]])
#
# matched_df_TD,matched_df_AUT = volumes_matching(volumes_bins, df_demographics, df_phenotype, df_phenotype)
# In[151]:
def age_matching(age_bins, df_TD_phenotype, df_AUT_phenotype, base_directory ):
# age_bins = np.array([[0,9],[9,12],[12,15],[15,18]])
bins_age_AUT_data = []
bins_age_TD_data = []
# for counter, _bin in enumerate(age_bins):
log_path = opj(base_directory,"log.txt")
log = open(log_path, 'a')
log.write("------------- Age Matching with the following bins -------------\n")
log.write("Age Bins: %s \n"%age_bins)
log.flush()
for age in age_bins:
selected_AUT = pd.DataFrame()
selected_TD = pd.DataFrame()
# print(age[0], age[1])
# selected_AUT = pd.concat([selected_AUT,df_AUT_phenotype[(df_AUT_phenotype['SEX'] == 1)
# & (df_AUT_phenotype['DSM_IV_TR'] == 1)
# & (df_AUT_phenotype['AGE_AT_SCAN'] > age[0])
# & (df_AUT_phenotype['AGE_AT_SCAN'] <= age[1]) ]])
# selected_TD = pd.concat([selected_TD,df_TD_phenotype.loc[(df_TD_phenotype['SEX'] == 1)
# & (df_TD_phenotype['DX_GROUP'] == 2)
# & (df_TD_phenotype['AGE_AT_SCAN'] > age[0])
# & (df_TD_phenotype['AGE_AT_SCAN'] <= age[1]) ]])
selected_AUT = pd.concat([selected_AUT,df_AUT_phenotype[(df_AUT_phenotype['AGE_AT_SCAN'] > age[0])
& (df_AUT_phenotype['AGE_AT_SCAN'] <= age[1]) ]])
selected_TD = pd.concat([selected_TD,df_TD_phenotype.loc[(df_TD_phenotype['AGE_AT_SCAN'] > age[0])
& (df_TD_phenotype['AGE_AT_SCAN'] <= age[1]) ]])
bins_age_AUT_data.append(selected_AUT)
bins_age_TD_data.append(selected_TD)
matched_df_TD,matched_df_AUT = matching(age_bins, bins_age_TD_data, bins_age_AUT_data)
# sub_ids = selected_df_TD.as_matrix(['SUB_ID']).squeeze()
# matched_df_TD.to_csv('age_matched_TD.csv')
return matched_df_TD,matched_df_AUT
# In[152]:
# Usage
# demographics_file_path = '/home1/varunk/Autism-Connectome-Analysis-brain_connectivity/notebooks/demographics.csv'
# phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
#
# df_demographics = pd.read_csv(demographics_file_path)
#
#
# df_phenotype = pd.read_csv(phenotype_file_path)
# df_phenotype = df_phenotype.sort_values(['SUB_ID'])
#
# age_bins = np.array([[0,9],[9,12],[12,15],[15,18]])
#
# matched_df_TD,matched_df_AUT = age_matching(age_bins, matched_df_TD, df_phenotype)
# ## TR Matching
# In[153]:
def tr_matching(TR_bins, df_demographics, df_TD_phenotype, df_AUT_phenotype, base_directory ):
# df_demographics = pd.read_csv(demographics_file_path)
df_demographics_TR = df_demographics.as_matrix(['SITE_NAME','TR']).squeeze()
log_path = opj(base_directory,"log.txt")
log = open(log_path, 'a')
log.write("------------- TR Matching with the following bins -------------\n")
log.write("TR Bins: %s \n"%TR_bins)
log.flush()
# df_phenotype = pd.read_csv(phenotype_file_path)
# df_phenotype = df_phenotype.sort_values(['SUB_ID'])
bins_TR_AUT_data = []
bins_TR_TD_data = []
for counter, _bin in enumerate(TR_bins):
df_demographics_TR_selected_bin = df_demographics_TR[np.where(np.logical_and((df_demographics_TR[:,1] > _bin[0]),(df_demographics_TR[:,1] <= _bin[1])))]
selected_AUT = pd.DataFrame()
selected_TD = pd.DataFrame()
for site in df_demographics_TR_selected_bin:
# print(site[0])
# selected_AUT = pd.concat([selected_AUT,df_AUT_phenotype.loc[(df_AUT_phenotype['SEX'] == 1)
# & (df_AUT_phenotype['DSM_IV_TR'] == 1)
# & (df_AUT_phenotype['SITE_ID'] == site[0])]])
# selected_TD = pd.concat([selected_TD,df_TD_phenotype.loc[(df_TD_phenotype['SEX'] == 1)
# & (df_TD_phenotype['DX_GROUP'] == 2)
# & (df_TD_phenotype['SITE_ID'] == site[0])]])
selected_AUT = pd.concat([selected_AUT,df_AUT_phenotype.loc[(df_AUT_phenotype['SITE_ID'] == site[0])]])
selected_TD = pd.concat([selected_TD,df_TD_phenotype.loc[(df_TD_phenotype['SITE_ID'] == site[0])]])
bins_TR_AUT_data.append(selected_AUT)
bins_TR_TD_data.append(selected_TD)
matched_df_TD, matched_df_AUT = matching(TR_bins, bins_TR_TD_data, bins_TR_AUT_data)
# sub_ids = selected_df_TD.as_matrix(['SUB_ID']).squeeze()
# matched_df_TD.to_csv('TR_matched_TD.csv')
return matched_df_TD, matched_df_AUT
# In[154]:
# usage
# demographics_file_path = '/home1/varunk/Autism-Connectome-Analysis-brain_connectivity/notebooks/demographics.csv'
# phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
# TR_bins = np.array([[0,2],[2,2.5],[2.5,3.0]])
#
#
# df_demographics = pd.read_csv(demographics_file_path)
# df_phenotype = pd.read_csv(phenotype_file_path)
# df_phenotype = df_phenotype.sort_values(['SUB_ID'])
#
# matched_df_TD = tr_matching(TR_bins,df_demographics, matched_df_TD, df_phenotype)
# In[155]:
# Combined Matching Usage
# demographics_file_path = '/home1/varunk/Autism-Connectome-Analysis-brain_connectivity/notebooks/demographics.csv'
# phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
# df_demographics = pd.read_csv(demographics_file_path)
# df_phenotype = pd.read_csv(phenotype_file_path)
# df_phenotype = df_phenotype.sort_values(['SUB_ID'])
#
#
#
# # Volume matching
# print('Volume Matching')
# volumes_bins = np.array([[0,150],[151,200],[201,250],[251,300]])
# matched_df_TD = df_phenotype
# matched_df_AUT = df_phenotype
# matched_df_TD, matched_df_AUT = volumes_matching(volumes_bins, df_demographics, matched_df_TD, matched_df_AUT)
#
# # TR matching
# print('TR Matching')
# TR_bins = np.array([[0,2],[2,2.5],[2.5,3.0]])
# # matched_df_TD = df_phenotype
# # matched_df_AUT = df_phenotype
# matched_df_TD,matched_df_AUT = tr_matching(TR_bins,df_demographics, matched_df_TD, matched_df_AUT)
#
#
# # Age Matching
# print('Age Matching')
# age_bins = np.array([[0,9],[9,12],[12,15],[15,18]])
# # matched_df_TD = df_phenotype
# # matched_df_AUT = df_phenotype
# matched_df_TD,matched_df_AUT = age_matching(age_bins, matched_df_TD, matched_df_AUT)
#
#
#
# matched_df_TD.loc[(matched_df_TD['SEX'] == 1) & (matched_df_TD['DSM_IV_TR'] == 0) & (matched_df_TD['EYE_STATUS_AT_SCAN'] == 2) ]
#
# matched_df_AUT.loc[(matched_df_AUT['SEX'] == 1) & (matched_df_AUT['DSM_IV_TR'] == 1) & (matched_df_AUT['EYE_STATUS_AT_SCAN'] == 2) ]
|
py | 1a525303dc1812de7b56aee7ffa1be68f0c8c97c | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class EtherPhysicalPortRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
EtherPhysicalPortRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this EtherPhysicalPortRef.
The Object Type of the referenced REST resource.
:return: The object_type of this EtherPhysicalPortRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this EtherPhysicalPortRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this EtherPhysicalPortRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this EtherPhysicalPortRef.
The Moid of the referenced REST resource.
:return: The moid of this EtherPhysicalPortRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this EtherPhysicalPortRef.
The Moid of the referenced REST resource.
:param moid: The moid of this EtherPhysicalPortRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this EtherPhysicalPortRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this EtherPhysicalPortRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this EtherPhysicalPortRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this EtherPhysicalPortRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, EtherPhysicalPortRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 1a52531dda1600211bac851e9bab4686ab11b065 | import pickle
import torch
from model import RNN
def read_metadata(metadata_path):
with open(metadata_path, 'rb') as f:
metadata = pickle.load(f)
input_stoi = metadata['input_stoi']
label_itos = metadata['label_itos']
return input_stoi, label_itos
def load_model(model_path, input_stoi):
model = RNN(
len(set(input_stoi.values())), 100, 256, 1,
2, True, 0.5, input_stoi['<pad>']
)
model.load_state_dict(torch.load(model_path))
model = model.eval()
return model
def predict_sentiment(sentence, model_path, metadata_path):
print ('Fetching Meta-Data')
input_stoi, label_itos = read_metadata(metadata_path)
print('Meta Data Loaded')
model = load_model(model_path, input_stoi)
print('Tokenization')
tokenized = [tok for tok in sentence.split()]
indexed = [input_stoi[t] for t in tokenized]
tensor = torch.LongTensor(indexed)
tensor = tensor.unsqueeze(1)
length_tensor = torch.LongTensor([len(indexed)])
print('Parsing through Model')
prediction = torch.sigmoid(model(tensor, length_tensor))
print('prediction-',prediction)
return label_itos[round(prediction.item())] |
py | 1a525403a0a775d38a0658b8e9214595f90ec805 | import sqlite3
import os
class DbHelper:
connection = None
cursor = None
def __init__(self, path: str):
if not self.check_file(path):
open('path', 'w+').close()
try:
self.connection = sqlite3.connect(path)
self.cursor = self.connection.cursor()
except sqlite3.Error as e:
print("An error has occured while opening the database:", e.args[0])
@staticmethod
def check_file(path: str):
return os.path.isfile(path)
def query(self, query, param):
self.cursor.execute(query, param)
def getFirstResult(self) -> []:
return self.cursor.fetchone()
def queryAll(self, query, param) -> []:
self.cursor.execute(query, param)
return self.cursor.fetchall()
def close(self):
self.connection.close()
|
py | 1a52543289cb7671a991c47ddd674d6041f13427 | from App.extensions import db
from datetime import datetime
class Posts(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer,primary_key=True)
content = db.Column(db.Text)
pid = db.Column(db.Integer,default=0)
path = db.Column(db.String(255),default='0,')
#记录时间的
timestamp = db.Column(db.DateTime,default=datetime.utcnow)
#user的外键
uid = db.Column(db.Integer,db.ForeignKey('user.id')) |
py | 1a52545ec70cca1e0cb3b3a826bda96f6bb22000 | import json
import os
from botocore.exceptions import ClientError
from typing import Dict, Any, List
from pprint import pprint
from datetime import datetime, timedelta
import uuid
from collections import namedtuple
from .create_processes_metric_image_util import generate_processes_metrics_image
AlarmStateChangeData = namedtuple('AlarmStateChangeData', [
'period', 'queryDate', 'recentDatapoints', 'startDate', 'statistic', 'threshold', 'version','evaluatedDatapoints'])
INSTANCE_ID = slice(0, 19)
def create_metric_images_urls(alarm_details, metric_names, aws_services, instance_type):
'''
This function generates metric images.
'''
metric_images_urls: Dict[str, str] = {}
try:
alarm_name: str = alarm_details['AlarmName']
instance_id: str = alarm_name[INSTANCE_ID]
metric_alarms_new_state_details: Dict[str, Any] = get_alarms_new_state_data(
alarm_details, aws_services)
for name in metric_names:
image_url = generate_processes_metrics_image(instance_type, instance_id, name, metric_alarms_new_state_details['CPUUtilization'], aws_services) \
if 'procstat' in name else generate_metric_image(instance_id, name, metric_alarms_new_state_details[name], aws_services)
print(f'{name} metric image url of instance {instance_id}.')
print(f'{image_url}')
if image_url is not None:
metric_images_urls[name] = image_url
except (Exception, ClientError) as err:
print(err)
print(
f'Failed to generate {metric_names} metric images of instance {instance_id} because of above err.')
raise err
else:
return metric_images_urls
def get_alarms_new_state_data(alarm_details: Dict[str, Any], aws_services: Dict[str, Any]) -> Dict[str, Any]:
print('Get alarms history.')
cloudwatch_resource = aws_services['cloudwatch_resource']
child_alarms_details: List[Dict[str, Any]
] = alarm_details['TriggeringChildren']
alarm_names: List[str] = []
today = datetime.utcnow()
year, month, day = today.year, today.month, today.day
alarms_new_state: Dict[str, Any] = {}
try:
for alarm in child_alarms_details:
_, _, _, _, _, _, alarm_name = alarm['Arn'].split(':')
alarm_names.append(alarm_name)
print(alarm_names)
for alarm_name in alarm_names:
alarm = cloudwatch_resource.Alarm(alarm_name)
history: Dict[str, Any] = alarm.describe_history(AlarmTypes=[
'MetricAlarm',
],
HistoryItemType='StateUpdate',
#StartDate=datetime(year, month, day),
#EndDate=datetime.utcnow(),
MaxRecords=1,#Get the record of transition from OK to ALARM.
ScanBy='TimestampDescending')
for item in history['AlarmHistoryItems']:
print(item['AlarmName'])
history_data: Dict[str, Any] = json.loads(item['HistoryData'])
print(history_data)
new_state_data: Dict[str, Any] = history_data['newState'][
'stateReasonData'] if history_data['newState']['stateValue'] == 'ALARM' else None
if new_state_data is not None:
alarms_new_state['CPUUtilization' if 'CPUUtilization' in alarm_name else 'CPUCreditBalance'] = {'stateReason': history_data['newState']['stateReason'],
'stateReasonData': AlarmStateChangeData(**new_state_data)}
except Exception as err:
print(err)
print(
f'Failed to retrieve new state data of {alarm_names} from history.')
pprint(alarms_new_state)
return alarms_new_state
def generate_metric_image(instance_id: str, metric_name: str, alarm_new_state: Dict[str, Any], aws_services: Dict[str, Any]) -> str:
try:
aws_region: str = os.environ.get('AWS_REGION')
cloudwatch_client = aws_services['cloudwatch_client']
s3_bucket: str = os.environ.get('S3_BUCKET_TO_STORE_GENERATED_IMAGES')
horizontal_annotation: List[Dict[str:Any]] = []
horizontal_annotation.append({
"color": "#ff6961",
"label": '{}'.format(alarm_new_state['stateReason']),
# "fill": "above",
"value": float('{}'.format(alarm_new_state['stateReasonData'].threshold))
})
for datapoint in alarm_new_state['stateReasonData'].recentDatapoints:
horizontal_annotation.append({
"color": "#ff6961",
"label": datapoint,
# "fill": "above",
"value": float(datapoint)
})
metric_request: Dict[str:Any] = {
"metrics": [
["AWS/EC2",
f'{metric_name}',
"InstanceId", f'{instance_id}',
{
"stat": '{}'.format(alarm_new_state['stateReasonData'].statistic),
"period": int('{}'.format(alarm_new_state['stateReasonData'].period))
}]
],
"height": 1024,
"width": 1024,
# "timezone": "+1100",
"start": "-PT3H",
"end": "+PT1H",
"liveData": True,
"annotations": {
"horizontal": horizontal_annotation,
"vertical": [
{
"color": "#9467bd",
"label": "start",
# "value":"2018-08-28T15:25:26Z",
# "value": (datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")),
"value": datetime.strptime('{}'.format(alarm_new_state['stateReasonData'].startDate), "%Y-%m-%dT%H:%M:%S.%f+0000").strftime("%Y-%m-%dT%H:%M:%SZ"),
# "fill": "after"
},
{
"color": "#9467bd",
"value": datetime.strptime('{}'.format(alarm_new_state['stateReasonData'].queryDate), "%Y-%m-%dT%H:%M:%S.%f+0000").strftime("%Y-%m-%dT%H:%M:%SZ"),
"label": "end"
}
]
}
}
print(f'{metric_request}')
response = cloudwatch_client.get_metric_widget_image(
MetricWidget=json.dumps(metric_request)
# OutputFormat='string'
)
image_name: str = f'{uuid.uuid4().hex}.jpeg'
upload_image_to_s3(
image_name, response["MetricWidgetImage"], aws_services)
except Exception as err:
print(err)
print('Failed because of above error.')
else:
return f'https://{s3_bucket}.s3-{aws_region}.amazonaws.com/{image_name}'
def upload_image_to_s3(image_name: str, image: bytearray, aws_services: Dict[str, Any]):
try:
s3_resource = aws_services['s3_resource']
s3_bucket: str = os.environ.get('S3_BUCKET_TO_STORE_GENERATED_IMAGES')
bucket = s3_resource.Bucket(f'{s3_bucket}')
bucket.put_object(Key=image_name,
ACL='public-read',
Body=image,
ContentType='image/jpeg'
)
except Exception as err:
print(err)
print('Failed because of above error')
|
py | 1a5256446d2e740013f1ddb6c4943df0de41d99b | """
ASGI config for guitars project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'guitars.settings')
application = get_asgi_application()
|
py | 1a5256a15f8378fed05245408657d18e18638ec6 | import FreeCAD, Part, Mesh
DOC = FreeCAD.activeDocument()
DOC_NAME = "part_output_coil_without_windings"
def clear_doc():
# Clear the active document deleting all the objects
for obj in DOC.Objects:
DOC.removeObject(obj.Name)
def setview():
# Rearrange View
FreeCAD.Gui.SendMsgToActiveView("ViewFit")
FreeCAD.Gui.activeDocument().activeView().viewAxometric()
if DOC is None:
FreeCAD.newDocument(DOC_NAME)
FreeCAD.setActiveDocument(DOC_NAME)
DOC = FreeCAD.activeDocument()
else:
clear_doc()
# EPS= tolerance to use to cut the parts
EPS = 0.10
EPS_C = EPS * -0.5
L_vis_10m = 100
h_equerre = 2.2
h_ecrou_10m = 10
s_rondelle_10m = 2
D = 60
D_aimant = 16
D_winding = 26
D_percage_2_5 = 2.5
D_percage_5 = 5
h_coil = L_vis_10m - h_equerre*2 - s_rondelle_10m*2 - h_ecrou_10m - 5
cylinder_1 = Part.makeCylinder(D/2, h_coil)
cylinder_2 = Part.makeCylinder(D_aimant/2, h_coil)
cylinder_3 = Part.makeCylinder(D/2, h_coil - 2*3)
cylinder_4 = Part.makeCylinder(D_winding/2, h_coil - 2*3)
# cylinder_1 cut by cylinder_2
cylinder_1 = cylinder_1.cut(cylinder_2)
# cylinder_3 cut by cylinder_4
cylinder_3 = cylinder_3.cut(cylinder_4)
# cylinder_1 cut by cylinder_3
cylinder_3_vector = FreeCAD.Vector(0, 0, 3)
cylinder_3.translate(cylinder_3_vector)
cylinder_1 = cylinder_1.cut(cylinder_3)
# cylinder_1 cut by cylinder_5 in several times
degre = 20
for i in range(int(360/degre)):
radius = D_winding/2 - D_percage_2_5
alpha=(i*degre*math.pi)/180
cylinder_5_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
cylinder_5 = Part.makeCylinder(D_percage_2_5/2, h_coil)
cylinder_5.translate(cylinder_5_vector)
cylinder_1 = cylinder_1.cut(cylinder_5)
# cylinder_1 cut by cylinder_5 in several times
degre = 20
for i in range(int(360/degre)):
radius = D_winding/2 + D_percage_5
alpha=(i*degre*math.pi)/180
cylinder_5_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
cylinder_5 = Part.makeCylinder(D_percage_5/2, h_coil)
cylinder_5.translate(cylinder_5_vector)
cylinder_1 = cylinder_1.cut(cylinder_5)
# cylinder_1 cut by cylinder_5 in several times
degre = 20
for i in range(int(360/degre)):
radius = D_winding/2 + D_percage_5*2 + 2
alpha=(i*degre*math.pi)/180
cylinder_5_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
cylinder_5 = Part.makeCylinder(D_percage_5/2, h_coil)
cylinder_5.translate(cylinder_5_vector)
cylinder_1 = cylinder_1.cut(cylinder_5)
Part.show(cylinder_1)
DOC.recompute()
__objs__=[]
__objs__.append(FreeCAD.getDocument("part_output_coil_without_windings").getObject("Shape"))
stl_file = u"part_output_coil_without_windings.stl"
Mesh.export(__objs__, stl_file)
setview()
# Generate PNG files
file = 'part_output_coil_without_windings_'
# Ombr�
Gui.runCommand('Std_DrawStyle',5)
i = 1
Gui.activeDocument().activeView().viewIsometric()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewFront()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewTop()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRight()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRear()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewBottom()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewLeft()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
# Filaire
Gui.runCommand('Std_DrawStyle',2)
i += 1
Gui.activeDocument().activeView().viewIsometric()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewFront()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewTop()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRight()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRear()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewBottom()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewLeft()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
|
py | 1a5256a4ae7dd9bb6a8e3f67c1bf36914412ab38 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from PooledEffect import PooledEffect
from EffectController import EffectController
import os
class HealSparks(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleSpark')
self.setDepthWrite(0)
self.setLightOff()
self.setFogOff()
self.setColorScaleOff()
self.effectColor = Vec4(1, 1, 1, 1)
self.f = ParticleEffect.ParticleEffect('HealSparks')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('SphereVolumeEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.05)
self.p0.setLitterSize(4)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(0.5)
self.p0.factory.setLifespanSpread(0.25)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1, 1, 1, 1))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.001 * self.cardScale)
self.p0.renderer.setFinalXScale(0.004 * self.cardScale)
self.p0.renderer.setInitialYScale(0.001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.005 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(0.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(1.0)
def createTrack(self, delay=0.0):
self.p0.renderer.setInitialXScale(0.001 * self.cardScale)
self.p0.renderer.setFinalXScale(0.004 * self.cardScale)
self.p0.renderer.setInitialYScale(0.001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.005 * self.cardScale)
self.startEffect = Sequence(Wait(delay), Func(self.p0.clearToInitial), Func(self.p0.softStart), Func(self.f.start, self, self))
self.endEffect = Sequence(Func(self.p0.softStop), Wait(2.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(3.0), self.endEffect)
def setEffectColor(self, color):
self.effectColor = color
self.p0.renderer.getColorInterpolationManager().clearToInitial()
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, self.effectColor * 2.0, self.effectColor, 1)
def play(self, delay=0.0):
self.createTrack(delay)
self.track.start()
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
self.adjustIval = None
return |
py | 1a5257501af9a5317b38cf961d5d99894c7cd74d | from binascii import crc_hqx
from decimal import Decimal
from typing import Optional
def get_payload_format_indicator() -> str:
return "000201"
def get_merchant_account_information(key: str) -> str:
GUI = "0014BR.GOV.BCB.PIX"
string = "01{l:02d}{k}".format(l=len(key), k=key)
result = GUI + string
if len(result) > 99:
raise ValueError("PIX key is too long.")
return "26{l:02d}{r}".format(l=len(result), r=result)
def get_merchant_category_code() -> str:
return '52040000'
def get_transaction_currency() -> str:
return '5303986'
def get_transaction_value(value: Decimal) -> str:
if value <= Decimal('0.00'):
raise ValueError("Only positive decimals allowed.")
string = str(value)
return f"54{'{:02d}'.format(len(string))}{string}"
def get_country_code() -> str:
return '5802BR'
def get_merchant_name(name: str) -> str:
if len(name) > 25:
raise ValueError(
"Recipient name must be less than 25 characters long.")
return f"59{'{:02d}'.format(len(name))}{name}"
def get_merchant_city(city: str) -> str:
if len(city) > 15:
raise ValueError("Max of 15 characters for city name.")
return f"60{'{:02d}'.format(len(city))}{city}"
def get_additional_data_field_template(identifier: Optional[str] = None):
if not identifier:
identifier = '***'
if len(identifier) > 25:
raise ValueError("Only indentifiers with length less than 25 "
"characters are allowed.")
txid = f"05{'{:02d}'.format(len(identifier))}{identifier}"
return f"62{'{:02d}'.format(len(txid))}{txid}"
def get_crc16(payload: str) -> str:
checksum = crc_hqx(bytes(payload + '6304', 'ascii'), 0xFFFF)
return hex(checksum)[2:].upper()
|
py | 1a52578b8653f9d388759cb0adacdae036502733 | #!/usr/bin/python
"""
Script to upload images to wikipedia.
The following parameters are supported:
-keep Keep the filename as is
-filename: Target filename without the namespace prefix
-prefix: Add specified prefix to every filename.
-noverify Do not ask for verification of the upload description if one
is given
-abortonwarn: Abort upload on the specified warning type. If no warning type
is specified, aborts on any warning.
-ignorewarn: Ignores specified upload warnings. If no warning type is
specified, ignores all warnings. Use with caution
-chunked: Upload the file in chunks (more overhead, but restartable). If
no value is specified the chunk size is 1 MiB. The value must
be a number which can be preceded by a suffix. The units are:
No suffix: Bytes
'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B)
'Ki': Kibibytes (1024 B)
'Mi': Mebibytes (1024x1024 B)
The suffixes are case insensitive.
-always Don't ask the user anything. This will imply -keep and
-noverify and require that either -abortonwarn or -ignorewarn
is defined for all. It will also require a valid file name and
description. It'll only overwrite files if -ignorewarn includes
the 'exists' warning.
-recursive When the filename is a directory it also uploads the files from
the subdirectories.
-summary: Pick a custom edit summary for the bot.
-descfile: Specify a filename where the description is stored
It is possible to combine -abortonwarn and -ignorewarn so that if the specific
warning is given it won't apply the general one but more specific one. So if it
should ignore specific warnings and abort on the rest it's possible by defining
no warning for -abortonwarn and the specific warnings for -ignorewarn. The
order does not matter. If both are unspecific or a warning is specified by
both, it'll prefer aborting.
If any other arguments are given, the first is either URL, filename or
directory to upload, and the rest is a proposed description to go with the
upload. If none of these are given, the user is asked for the directory, file
or URL to upload. The bot will then upload the image to the wiki.
The script will ask for the location of an image(s), if not given as a
parameter, and for a description.
"""
#
# (C) Pywikibot team, 2003-2020
#
# Distributed under the terms of the MIT license.
#
import codecs
import math
import os
import re
import pywikibot
from pywikibot.bot import suggest_help
from pywikibot.specialbots import UploadRobot
CHUNK_SIZE_REGEX = re.compile(
r'-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$', re.I)
def get_chunk_size(match) -> int:
"""Get chunk size."""
if not match:
pywikibot.error('Chunk size parameter is not valid.')
chunk_size = 0
elif match.group(1): # number was in there
base = float(match.group(1))
if match.group(2): # suffix too
suffix = match.group(2).lower()
if suffix == 'k':
suffix = 1000
elif suffix == 'm':
suffix = 1000000
elif suffix == 'ki':
suffix = 1 << 10
elif suffix == 'mi':
suffix = 1 << 20
else:
suffix = 1
chunk_size = math.trunc(base * suffix)
else:
chunk_size = 1 << 20 # default to 1 MiB
return chunk_size
def main(*args) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
url = ''
description = []
summary = None
keep_filename = False
always = False
use_filename = None
filename_prefix = None
verify_description = True
aborts = set()
ignorewarn = set()
chunk_size = 0
recursive = False
description_file = None
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
local_args = pywikibot.handle_args(args)
for option in local_args:
arg, _, value = option.partition(':')
if arg == '-always':
keep_filename = True
always = True
verify_description = False
elif arg == '-recursive':
recursive = True
elif arg == '-keep':
keep_filename = True
elif arg == '-filename':
use_filename = value
elif arg == '-prefix':
filename_prefix = value
elif arg == '-summary':
summary = value
elif arg == '-noverify':
verify_description = False
elif arg == '-abortonwarn':
if value and aborts is not True:
aborts.add(value)
else:
aborts = True
elif arg == '-ignorewarn':
if value and ignorewarn is not True:
ignorewarn.add(value)
else:
ignorewarn = True
elif arg == '-chunked':
match = CHUNK_SIZE_REGEX.match(option)
chunk_size = get_chunk_size(match)
elif arg == '-descfile':
description_file = value
elif not url:
url = option
else:
description.append(option)
description = ' '.join(description)
if description_file:
if description:
pywikibot.error('Both a description and a -descfile were '
'provided. Please specify only one of those.')
return
with codecs.open(description_file,
encoding=pywikibot.config.textfile_encoding) as f:
description = f.read().replace('\r\n', '\n')
while not ('://' in url or os.path.exists(url)):
if not url:
error = 'No input filename given.'
else:
error = 'Invalid input filename given.'
if not always:
error += ' Try again.'
if always:
url = None
break
pywikibot.output(error)
url = pywikibot.input('URL, file or directory where files are now:')
if always and (aborts is not True and ignorewarn is not True
or not description or url is None):
additional = ''
missing = []
if url is None:
missing += ['filename']
additional = error + ' '
if description is None:
missing += ['description']
if aborts is not True and ignorewarn is not True:
additional += ('Either -ignorewarn or -abortonwarn must be '
'defined for all codes. ')
additional += 'Unable to run in -always mode'
suggest_help(missing_parameters=missing, additional_text=additional)
return
if os.path.isdir(url):
file_list = []
for directory_info in os.walk(url):
if not recursive:
# Do not visit any subdirectories
directory_info[1][:] = []
for dir_file in directory_info[2]:
file_list.append(os.path.join(directory_info[0], dir_file))
url = file_list
else:
url = [url]
bot = UploadRobot(url, description=description, use_filename=use_filename,
keep_filename=keep_filename,
verify_description=verify_description, aborts=aborts,
ignore_warning=ignorewarn, chunk_size=chunk_size,
always=always, summary=summary,
filename_prefix=filename_prefix)
bot.run()
if __name__ == '__main__':
main()
|
py | 1a525a6acb3436a21449ad8a3c58817757da27cc | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_data.config.ipynb (unless otherwise specified).
__all__ = ['RLE', 'Record', 'Info', 'Annotation', 'BBox', 'from_xyxy_abs', 'from_rle', 'Seg', 'from_polys', 'from_rle']
# Cell
from ..basics import *
# Cell
class RLE:
def __init__(self, v, h, w): store_attr(self, 'v,h,w')
@classmethod
def from_str(cls, s, h, w):
v = np.array(s.split(), dtype=np.uint)
return cls(v, h, w)
def __repr__(self): return str({'shape':(self.h,self.w), 'points':self.v})
# Cell
@patch
def decode(self:RLE):
'From https://www.kaggle.com/julienbeaulieu/imaterialist-detectron2'
mask = np.full(self.h*self.w, 0, dtype=np.uint8)
for i, start_pixel in enumerate(self.v[::2]):
mask[start_pixel: start_pixel+self.v[2*i+1]] = 1
mask = mask.reshape((self.h, self.w), order='F')
return mask
# Cell
@patch
def to_bbox(self:RLE):
'From https://www.kaggle.com/julienbeaulieu/imaterialist-detectron2'
shape = (self.h,self.w)
a = self.v
a = a.reshape((-1, 2)) # an array of (start, length) pairs
a[:,0] -= 1 # `start` is 1-indexed
y0 = a[:,0] % shape[0]
y1 = y0 + a[:,1]
if np.any(y1 > shape[0]):
# got `y` overrun, meaning that there are a pixels in mask on 0 and shape[0] position
y0 = 0
y1 = shape[0]
else:
y0 = np.min(y0)
y1 = np.max(y1)
x0 = a[:,0] // shape[0]
x1 = (a[:,0] + a[:,1]) // shape[0]
x0 = np.min(x0)
x1 = np.max(x1)
if x1 > shape[1]:
# just went out of the image dimensions
raise ValueError("invalid self or image dimensions: x1=%d > shape[1]=%d" % (
x1, shape[1]
))
return x0, y0, x1, y1
# Cell
class Record(Cfg):
def __init__(self, info, annons): self.info,self.annons = info,L(annons)
def to_cfg(self): return {**self.info.to_cfg(),
'annotations':[o.to_cfg() for o in self.annons]}
# Cell
class Info(Cfg):
def __init__(self, id, fn, h, w): store_attr(self, 'fn,id,h,w')
def to_cfg(self): return {'file_name':self.fn,'image_id':self.id,'height':self.h,'width':self.w}
# Cell
class Annotation(Cfg):
def __init__(self, id, bbox, seg, iscrowd=0): store_attr(self, 'id,bbox,seg,iscrowd')
def to_cfg(self): return {**self.bbox.to_cfg(), **self.seg.to_cfg(),
'category_id':self.id, 'iscrowd':self.iscrowd}
# Cell
class BBox(Cfg):
def __init__(self, pts, mode): self.pts,self.mode = list(map(int, pts)),mode
def to_cfg(self): return {'bbox':self.pts, 'bbox_mode':self.mode}
# Cell
@patch_classmethod
def from_xyxy_abs(cls:BBox, pts): return cls(pts, BoxMode.XYXY_ABS)
# Cell
@patch_classmethod
def from_rle(cls:BBox, rle): return cls(rle.to_bbox(), BoxMode.XYXY_ABS)
# Cell
class Seg(Cfg):
def __init__(self, polys): self.polys = polys
def to_cfg(self): return {'segmentation':self.polys}
# Cell
@patch_classmethod
def from_polys(cls:Seg, polys): return cls(polys)
# Cell
@patch_classmethod
def from_rle(cls:Seg, rle):
mask = rle.decode()
conts,_ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
seg = []
for cont in conts:
cont = cont.flatten().tolist()
if len(cont) > 4: seg.append(cont)
return cls(seg) |
py | 1a525bd54d44a74d1c8a0c818dbed7a6ffa89063 | # ------------
# User Instructions
#
# In this problem you will implement SLAM in a 2 dimensional
# world. Please define a function, slam, which takes five
# parameters as input and returns the vector mu. This vector
# should have x, y coordinates interlaced, so for example,
# if there were 2 poses and 2 landmarks, mu would look like:
#
# mu = matrix([[Px0],
# [Py0],
# [Px1],
# [Py1],
# [Lx0],
# [Ly0],
# [Lx1],
# [Ly1]])
#
# data - This is the data that is generated with the included
# make_data function. You can also use test_data to
# make sure your function gives the correct result.
#
# N - The number of time steps.
#
# num_landmarks - The number of landmarks.
#
# motion_noise - The noise associated with motion. The update
# strength for motion should be 1.0 / motion_noise.
#
# measurement_noise - The noise associated with measurement.
# The update strength for measurement should be
# 1.0 / measurement_noise.
#
#
# Enter your code at line 509
# --------------
# Testing
#
# Uncomment the test cases at the bottom of this document.
# Your output should be identical to the given results.
from math import *
import random
# ===============================================================
#
# SLAM in a rectolinear world (we avoid non-linearities)
#
#
# ===============================================================
# ------------------------------------------------
#
# this is the matrix class
# we use it because it makes it easier to collect constraints in GraphSLAM
# and to calculate solutions (albeit inefficiently)
#
class matrix:
# implements basic operations of a matrix class
# ------------
#
# initialization - can be called with an initial matrix
#
def __init__(self, value=[[]]):
self.value = value
self.dimx = len(value)
self.dimy = len(value[0])
if value == [[]]:
self.dimx = 0
# ------------
#
# makes matrix of a certain size and sets each element to zero
#
def zero(self, dimx, dimy):
if dimy == 0:
dimy = dimx
# check if valid dimensions
if dimx < 1 or dimy < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dimx
self.dimy = dimy
self.value = [[0.0 for row in range(dimy)] for col in range(dimx)]
# ------------
#
# makes matrix of a certain (square) size and turns matrix into identity matrix
#
def identity(self, dim):
# check if valid dimension
if dim < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dim
self.dimy = dim
self.value = [[0.0 for row in range(dim)] for col in range(dim)]
for i in range(dim):
self.value[i][i] = 1.0
# ------------
#
# prints out values of matrix
#
def show(self, txt=''):
for i in range(len(self.value)):
print txt + '[' + ', '.join('%.3f' % x for x in self.value[i]) + ']'
print ' '
# ------------
#
# defines elmement-wise matrix addition. Both matrices must be of equal dimensions
#
def __add__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError, "Matrices must be of equal dimension to add"
else:
# add if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] + other.value[i][j]
return res
# ------------
#
# defines elmement-wise matrix subtraction. Both matrices must be of equal dimensions
#
def __sub__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError, "Matrices must be of equal dimension to subtract"
else:
# subtract if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] - other.value[i][j]
return res
# ------------
#
# defines multiplication. Both matrices must be of fitting dimensions
#
def __mul__(self, other):
# check if correct dimensions
if self.dimy != other.dimx:
raise ValueError, "Matrices must be m*n and n*p to multiply"
else:
# multiply if correct dimensions
res = matrix()
res.zero(self.dimx, other.dimy)
for i in range(self.dimx):
for j in range(other.dimy):
for k in range(self.dimy):
res.value[i][j] += self.value[i][k] * other.value[k][j]
return res
# ------------
#
# returns a matrix transpose
#
def transpose(self):
# compute transpose
res = matrix()
res.zero(self.dimy, self.dimx)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[j][i] = self.value[i][j]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[ 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15]])
#
# l.take([0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 3, 4],
# [11, 13, 14]]
#
#
# take is used to remove rows and columns from existing matrices
# list1/list2 define a sequence of rows/columns that shall be taken
# if no list2 is provided, then list2 is set to list1 (good for
# symmetric matrices)
#
def take(self, list1, list2=[]):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in take()"
res = matrix()
res.zero(len(list1), len(list2))
for i in range(len(list1)):
for j in range(len(list2)):
res.value[i][j] = self.value[list1[i]][list2[j]]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[1, 2, 3],
# [4, 5, 6]])
#
# l.expand(3, 5, [0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 0, 2, 3, 0],
# [0, 0, 0, 0, 0],
# [4, 0, 5, 6, 0]]
#
# expand is used to introduce new rows and columns into an existing matrix
# list1/list2 are the new indexes of row/columns in which the matrix
# elements are being mapped. Elements for rows and columns
# that are not listed in list1/list2
# will be initialized by 0.0.
#
def expand(self, dimx, dimy, list1, list2=[]):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in expand()"
res = matrix()
res.zero(dimx, dimy)
for i in range(len(list1)):
for j in range(len(list2)):
res.value[list1[i]][list2[j]] = self.value[i][j]
return res
# ------------
#
# Computes the upper triangular Cholesky factorization of
# a positive definite matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
#
def Cholesky(self, ztol=1.0e-5):
res = matrix()
res.zero(self.dimx, self.dimx)
for i in range(self.dimx):
S = sum([(res.value[k][i]) ** 2 for k in range(i)])
d = self.value[i][i] - S
if abs(d) < ztol:
res.value[i][i] = 0.0
else:
if d < 0.0:
raise ValueError, "Matrix not positive-definite"
res.value[i][i] = sqrt(d)
for j in range(i + 1, self.dimx):
S = sum([res.value[k][i] * res.value[k][j] for k in range(i)])
if abs(S) < ztol:
S = 0.0
try:
res.value[i][j] = (self.value[i][j] - S) / res.value[i][i]
except:
raise ValueError, "Zero diagonal"
return res
# ------------
#
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
#
def CholeskyInverse(self):
res = matrix()
res.zero(self.dimx, self.dimx)
# Backward step for inverse.
for j in reversed(range(self.dimx)):
tjj = self.value[j][j]
S = sum([self.value[j][k] * res.value[j][k] for k in range(j + 1, self.dimx)])
res.value[j][j] = 1.0 / tjj ** 2 - S / tjj
for i in reversed(range(j)):
res.value[j][i] = res.value[i][j] = \
-sum([self.value[i][k] * res.value[k][j] for k in \
range(i + 1, self.dimx)]) / self.value[i][i]
return res
# ------------
#
# computes and returns the inverse of a square matrix
#
def inverse(self):
aux = self.Cholesky()
res = aux.CholeskyInverse()
return res
# ------------
#
# prints matrix (needs work!)
#
def __repr__(self):
return repr(self.value)
# ------------------------------------------------
#
# this is the robot class
#
# our robot lives in x-y space, and its motion is
# pointed in a random direction. It moves on a straight line
# until is comes close to a wall at which point it turns
# away from the wall and continues to move.
#
# For measurements, it simply senses the x- and y-distance
# to landmarks. This is different from range and bearing as
# commonly studied in the literature, but this makes it much
# easier to implement the essentials of SLAM without
# cluttered math
#
class robot:
# --------
# init:
# creates robot and initializes location to 0, 0
#
def __init__(self, world_size=100.0, measurement_range=30.0,
motion_noise=1.0, measurement_noise=1.0):
self.measurement_noise = 0.0
self.world_size = world_size
self.measurement_range = measurement_range
self.x = world_size / 2.0
self.y = world_size / 2.0
self.motion_noise = motion_noise
self.measurement_noise = measurement_noise
self.landmarks = []
self.num_landmarks = 0
def rand(self):
return random.random() * 2.0 - 1.0
# --------
#
# make random landmarks located in the world
#
def make_landmarks(self, num_landmarks):
self.landmarks = []
for i in range(num_landmarks):
self.landmarks.append([round(random.random() * self.world_size),
round(random.random() * self.world_size)])
self.num_landmarks = num_landmarks
# --------
#
# move: attempts to move robot by dx, dy. If outside world
# boundary, then the move does nothing and instead returns failure
#
def move(self, dx, dy):
x = self.x + dx + self.rand() * self.motion_noise
y = self.y + dy + self.rand() * self.motion_noise
if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size:
return False
else:
self.x = x
self.y = y
return True
# --------
#
# sense: returns x- and y- distances to landmarks within visibility range
# because not all landmarks may be in this range, the list of measurements
# is of variable length. Set measurement_range to -1 if you want all
# landmarks to be visible at all times
#
def sense(self):
Z = []
for i in range(self.num_landmarks):
dx = self.landmarks[i][0] - self.x + self.rand() * self.measurement_noise
dy = self.landmarks[i][1] - self.y + self.rand() * self.measurement_noise
if self.measurement_range < 0.0 or abs(dx) + abs(dy) <= self.measurement_range:
Z.append([i, dx, dy])
return Z
# --------
#
# print robot location
#
def __repr__(self):
return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y)
######################################################
# --------
# this routine makes the robot data
#
def make_data(N, num_landmarks, world_size, measurement_range, motion_noise,
measurement_noise, distance):
complete = False
while not complete:
data = []
# make robot and landmarks
r = robot(world_size, measurement_range, motion_noise, measurement_noise)
r.make_landmarks(num_landmarks)
seen = [False for row in range(num_landmarks)]
# guess an initial motion
orientation = random.random() * 2.0 * pi
dx = cos(orientation) * distance
dy = sin(orientation) * distance
for k in range(N - 1):
# sense
Z = r.sense()
# check off all landmarks that were observed
for i in range(len(Z)):
seen[Z[i][0]] = True
# move
while not r.move(dx, dy):
# if we'd be leaving the robot world, pick instead a new direction
orientation = random.random() * 2.0 * pi
dx = cos(orientation) * distance
dy = sin(orientation) * distance
# memorize data
data.append([Z, [dx, dy]])
# we are done when all landmarks were observed; otherwise re-run
complete = (sum(seen) == num_landmarks)
print ' '
print 'Landmarks: ', r.landmarks
print r
return data
####################################################
# --------------------------------
#
# print the result of SLAM, the robot pose(s) and the landmarks
#
def print_result(N, num_landmarks, result):
print
print 'Estimated Pose(s):'
for i in range(N):
print ' [' + ', '.join('%.3f' % x for x in result.value[2 * i]) + ', ' \
+ ', '.join('%.3f' % x for x in result.value[2 * i + 1]) + ']'
print
print 'Estimated Landmarks:'
for i in range(num_landmarks):
print ' [' + ', '.join('%.3f' % x for x in result.value[2 * (N + i)]) + ', ' \
+ ', '.join('%.3f' % x for x in result.value[2 * (N + i) + 1]) + ']'
# --------------------------------
#
# slam - retains entire path and all landmarks
#
############## ENTER YOUR CODE BELOW HERE ###################
def slam(data, N, num_landmarks, motion_noise, measurement_noise):
#
#
# Add your code here!
#
#
return mu # Make sure you return mu for grading!
############### ENTER YOUR CODE ABOVE HERE ###################
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
# Main routines
#
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
result = slam(data, N, num_landmarks, motion_noise, measurement_noise)
print_result(N, num_landmarks, result)
# -------------
# Testing
#
# Uncomment one of the test cases below to compare your results to
# the results shown for Test Case 1 and Test Case 2.
test_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608],
[3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]],
[[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]],
[-12.2607279422326, -15.801093326936487]],
[[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]],
[[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]],
[[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]],
[[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]],
[14.008259661173426, 14.274756084260822]],
[[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]],
[14.008259661173426, 14.274756084260822]],
[[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]],
[14.008259661173426, 14.274756084260822]],
[[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]],
[[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]],
[[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]],
[[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [
[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097],
[3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [
[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409],
[3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [
[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125],
[2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]],
[[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]],
[[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]],
[[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]],
[[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]],
[-17.82919359778298, 9.062000642947142]]]
test_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]],
[18.92765331253674, -6.460955043986683]], [
[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438],
[3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [
[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369],
[3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [
[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381],
[3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]],
[-11.167066095509824, 16.592065417497455]], [
[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914],
[3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]],
[-11.167066095509824, 16.592065417497455]], [
[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595],
[4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]],
[[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]],
[-6.595520680493778, -18.88118393939265]],
[[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]],
[-6.595520680493778, -18.88118393939265]],
[[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]],
[[], [11.492663265706092, 16.36822198838621]],
[[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]],
[[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]],
[11.492663265706092, 16.36822198838621]], [
[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614],
[3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]],
[11.492663265706092, 16.36822198838621]], [
[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765],
[3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]],
[11.492663265706092, 16.36822198838621]],
[[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]],
[19.628527845173146, 3.83678180657467]],
[[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]],
[-19.841703647091965, 2.5113335861604362]],
[[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]],
[-5.946642674882207, -19.09548221169787]], [
[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027],
[4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [
[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406],
[3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]],
[-5.946642674882207, -19.09548221169787]]]
## Test Case 1
##
## Estimated Pose(s):
## [49.999, 49.999]
## [37.971, 33.650]
## [26.183, 18.153]
## [13.743, 2.114]
## [28.095, 16.781]
## [42.383, 30.900]
## [55.829, 44.494]
## [70.855, 59.697]
## [85.695, 75.540]
## [74.010, 92.431]
## [53.543, 96.451]
## [34.523, 100.078]
## [48.621, 83.951]
## [60.195, 68.105]
## [73.776, 52.932]
## [87.130, 38.536]
## [80.301, 20.506]
## [72.797, 2.943]
## [55.244, 13.253]
## [37.414, 22.315]
##
## Estimated Landmarks:
## [82.954, 13.537]
## [70.493, 74.139]
## [36.738, 61.279]
## [18.696, 66.057]
## [20.633, 16.873]
## Test Case 2
##
## Estimated Pose(s):
## [49.999, 49.999]
## [69.180, 45.664]
## [87.742, 39.702]
## [76.269, 56.309]
## [64.316, 72.174]
## [52.256, 88.151]
## [44.058, 69.399]
## [37.001, 49.916]
## [30.923, 30.953]
## [23.507, 11.417]
## [34.179, 27.131]
## [44.154, 43.844]
## [54.805, 60.919]
## [65.697, 78.544]
## [77.467, 95.624]
## [96.801, 98.819]
## [75.956, 99.969]
## [70.199, 81.179]
## [64.053, 61.721]
## [58.106, 42.626]
##
## Estimated Landmarks:
## [76.778, 42.885]
## [85.064, 77.436]
## [13.546, 95.649]
## [59.448, 39.593]
## [69.262, 94.238]
### Uncomment the following three lines for test case 1 ###
# result = slam(test_data1, 20, 5, 2.0, 2.0)
# print_result(20, 5, result)
# print result
### Uncomment the following three lines for test case 2 ###
# result = slam(test_data2, 20, 5, 2.0, 2.0)
# print_result(20, 5, result)
# print result
|
py | 1a525be227a5834c6d32608d2801bc86b2b0a0f5 | # -*- coding: utf-8 -*-
"""
Module containing utilities to create/manipulate grids.
"""
import logging
import math
from typing import List, Tuple, Union
import geopandas as gpd
import pyproj
import shapely.ops as sh_ops
import shapely.geometry as sh_geom
#-------------------------------------------------------------
# First define/init some general variables/constants
#-------------------------------------------------------------
# Get a logger...
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
#-------------------------------------------------------------
# Grid tile helpers
#-------------------------------------------------------------
def create_grid(
total_bounds: Tuple[float, float, float, float],
nb_columns: int,
nb_rows: int,
crs: Union[pyproj.CRS, str, None]) -> gpd.GeoDataFrame:
xmin, ymin, xmax, ymax = total_bounds
width = (xmax-xmin)/nb_columns
height = (ymax-ymin)/nb_rows
return create_grid3(total_bounds=total_bounds, width=width, height=height, crs=crs)
def create_grid3(
total_bounds: Tuple[float, float, float, float],
width: float,
height: float,
crs: Union[pyproj.CRS, str, None]) -> gpd.GeoDataFrame:
"""
Args:
total_bounds (Tuple[float, float, float, float]): [description]
width (float): [description]
height (float): [description]
crs (Union[pyproj.CRS, str, None]): [description]
number_decimals (int, optional): The number of decimals the coordinates
of the grid will have. Defaults to None, so no rounding.
Returns:
gpd.GeoDataFrame: [description]
"""
xmin, ymin, xmax, ymax = total_bounds
rows = int(math.ceil((ymax-ymin) / height))
cols = int(math.ceil((xmax-xmin) / width))
polygons = []
cell_left = xmin
cell_right = xmin + width
for _ in range(cols):
if cell_left > xmax:
break
cell_top = ymin + height
cell_bottom = ymin
for _ in range(rows):
if cell_bottom > ymax:
break
polygons.append(sh_ops.Polygon([(cell_left, cell_top), (cell_right, cell_top), (cell_right, cell_bottom), (cell_left, cell_bottom)]))
cell_top += height
cell_bottom += height
cell_left += width
cell_right += width
return gpd.GeoDataFrame({'geometry': polygons}, crs=crs)
def create_grid2(
total_bounds: Tuple[float, float, float, float],
nb_squarish_tiles: int,
crs: Union[pyproj.CRS, str, None],
nb_squarish_tiles_max: int = None) -> gpd.GeoDataFrame:
"""
Creates a grid and tries to approximate the number of cells asked as
good as possible with grid cells that as close to square as possible.
Args:
total_bounds (Tuple[float, float, float, float]): bounds of the grid to be created
nb_squarish_cells (int): about the number of cells wanted
crs (CRS): the projection to create the grid in
nb_squarish_tiles_max (int, optional): the maximum number of cells
Returns:
gpd.GeoDataFrame: geodataframe with the grid
"""
# Check input
if nb_squarish_tiles_max is not None and nb_squarish_tiles_max < 1:
raise Exception("The maximum nb of tiles should be larger than 1")
# If more cells asked, calculate optimal number
xmin, ymin, xmax, ymax = total_bounds
total_width = xmax-xmin
total_height = ymax-ymin
columns_vs_rows = total_width/total_height
nb_rows = max(round(math.sqrt(nb_squarish_tiles/columns_vs_rows)), 1)
# Evade having too many cells (if few cells are asked)
if nb_rows > nb_squarish_tiles:
nb_rows = nb_squarish_tiles
nb_columns = max(round(nb_squarish_tiles/nb_rows), 1)
# If a maximum number of tiles is specified, check it
if nb_squarish_tiles_max is not None:
while((nb_rows * nb_columns) > nb_squarish_tiles_max):
# If the number of cells became larger than the max number of cells,
# increase the number of cells in the direction of the longest side
# of the resulting cells
if(nb_columns > 1
and (nb_rows == 1
or total_width/nb_columns > total_height/nb_rows)):
# Cell width is larger than cell height
nb_columns -= 1
else:
nb_rows -= 1
# Now we know everything to create the grid
return create_grid(
total_bounds=total_bounds,
nb_columns=nb_columns,
nb_rows=nb_rows,
crs=crs)
def split_tiles(
input_tiles: gpd.GeoDataFrame,
nb_tiles_wanted: int) -> gpd.GeoDataFrame:
nb_tiles = len(input_tiles)
if nb_tiles >= nb_tiles_wanted:
return input_tiles
nb_tiles_ratio_target = nb_tiles_wanted / nb_tiles
# Loop over all tiles in the grid
result_tiles = []
for tile in input_tiles.itertuples():
# For this tile, as long as the curr_nb_tiles_ratio_todo is not 1, keep splitting
curr_nb_tiles_ratio_todo = nb_tiles_ratio_target
curr_tiles_being_split = [tile.geometry]
while curr_nb_tiles_ratio_todo > 1:
# Check in how many parts the tiles are split in this iteration
divisor = 0
if round(curr_nb_tiles_ratio_todo) == 3:
divisor = 3
else:
divisor = 2
curr_nb_tiles_ratio_todo /= divisor
# Split all current tiles
tmp_tiles_after_split = []
for tile_to_split in curr_tiles_being_split:
xmin, ymin, xmax, ymax = tile_to_split.bounds
width = abs(xmax-xmin)
height = abs(ymax-ymin)
# Split in 2 or 3...
if divisor == 3:
if width > height:
split_line = sh_geom.LineString([
(xmin+width/3, ymin-1), (xmin+width/3, ymax+1),
(xmin+2*width/3, ymax+1), (xmin+2*width/3, ymin-1)])
else:
split_line = sh_geom.LineString([
(xmin-1, ymin+height/3), (xmax+1, ymin+height/3),
(xmax+1, ymin+2*height/3), (xmin-1, ymin+2*height/3)])
else:
if width > height:
split_line = sh_geom.LineString([(xmin+width/2, ymin-1), (xmin+width/2, ymax+1)])
else:
split_line = sh_geom.LineString([(xmin-1, ymin+height/2), (xmax+1, ymin+height/2)])
tmp_tiles_after_split.extend(sh_ops.split(tile_to_split, split_line))
curr_tiles_being_split = tmp_tiles_after_split
result_tiles.extend(curr_tiles_being_split)
# We should be ready...
return gpd.GeoDataFrame(geometry=result_tiles, crs=input_tiles.crs)
|
py | 1a525c9783b5cedf570b7be92a1238a4b4e538b1 | #!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# wgblog directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "wgblog"))
execute_from_command_line(sys.argv)
|
py | 1a525d306e9f52a6aaf657db4a6ab830226985f6 | import numpy as np
import pytest
import pandas as pd
import pandas.testing as tm
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"]],
{
"c": [13.0, 12.3, 123.23],
"d": [13.0, 233.0, 123.0],
"e": [13.0, 12.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_one_group(
dropna, tuples, outputs, nulls_fixture
):
# GH 3729 this is to test that NA is in one group
df_list = [
["A", "B", 12, 12, 12],
["A", nulls_fixture, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
["A", "B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]],
{
"c": [12.0, 13.3, 123.23, 1.0],
"d": [12.0, 234.0, 123.0, 1.0],
"e": [12.0, 13.0, 1.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups(
dropna, tuples, outputs, nulls_fixture, nulls_fixture2
):
# GH 3729 this is to test that NA in different groups with different representations
df_list = [
["A", "B", 12, 12, 12],
["A", nulls_fixture, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
[nulls_fixture2, "B", 1, 1, 1.0],
["A", nulls_fixture2, 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, idx, outputs",
[
(True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}),
(
False,
["A", "B", np.nan],
{
"b": [123.23, 13.0, 12.3],
"c": [123.0, 13.0, 233.0],
"d": [1.0, 13.0, 12.0],
},
),
],
)
def test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs):
# GH 3729
df_list = [
["B", 12, 12, 12],
[None, 12.3, 233.0, 12],
["A", 123.23, 123, 1],
["B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"])
grouped = df.groupby("a", dropna=dropna).sum()
expected = pd.DataFrame(outputs, index=pd.Index(idx, dtype="object", name="a"))
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, idx, expected",
[
(True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])),
(
False,
["a", "a", "b", np.nan],
pd.Series([3, 3, 3], index=["a", "b", np.nan]),
),
],
)
def test_groupby_dropna_series_level(dropna, idx, expected):
ser = pd.Series([1, 2, 3, 3], index=idx)
result = ser.groupby(level=0, dropna=dropna).sum()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected",
[
(True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")),
(
False,
pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"),
),
],
)
def test_groupby_dropna_series_by(dropna, expected):
ser = pd.Series(
[390.0, 350.0, 30.0, 20.0],
index=["Falcon", "Falcon", "Parrot", "Parrot"],
name="Max Speed",
)
result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"]],
{
"c": [13.0, 12.3, 123.23],
"d": [12.0, 233.0, 123.0],
"e": [1.0, 12.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs):
# GH 3729
df_list = [
["A", "B", 12, 12, 12],
["A", None, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
["A", "B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
agg_dict = {"c": sum, "d": max, "e": "min"}
grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict)
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"datetime1, datetime2",
[
(pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")),
(pd.Timedelta("-2 days"), pd.Timedelta("-1 days")),
(pd.Period("2020-01-01"), pd.Period("2020-02-01")),
],
)
@pytest.mark.parametrize(
"dropna, values", [(True, [12, 3]), (False, [12, 3, 6],)],
)
def test_groupby_dropna_datetime_like_data(
dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2
):
# 3729
df = pd.DataFrame(
{
"values": [1, 2, 3, 4, 5, 6],
"dt": [
datetime1,
unique_nulls_fixture,
datetime2,
unique_nulls_fixture2,
datetime1,
datetime1,
],
}
)
if dropna:
indexes = [datetime1, datetime2]
else:
indexes = [datetime1, datetime2, np.nan]
grouped = df.groupby("dt", dropna=dropna).agg({"values": sum})
expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt"))
tm.assert_frame_equal(grouped, expected)
|
py | 1a525da1af07e3da9fde07ba960fde3d09f64edd | import argparse
import json
import torch
from scripts.default_config import (get_default_config, imagedata_kwargs,
model_kwargs, merge_from_files_with_base)
import torchreid
from torchreid.utils import collect_env_info, set_random_seed
from ptflops import get_model_complexity_info
def build_datamanager(cfg, classification_classes_filter=None):
return torchreid.data.ImageDataManager(filter_classes=classification_classes_filter, **imagedata_kwargs(cfg))
def reset_config(cfg, args):
if args.root:
cfg.data.root = args.root
if args.custom_roots:
cfg.custom_datasets.roots = args.custom_roots
if args.custom_types:
cfg.custom_datasets.types = args.custom_types
if args.custom_names:
cfg.custom_datasets.names = args.custom_names
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config-file', type=str, default='', required=True,
help='path to config file')
parser.add_argument('--custom-roots', type=str, nargs='+',
help='types or paths to annotation of custom datasets (delimited by space)')
parser.add_argument('--custom-types', type=str, nargs='+',
help='path of custom datasets (delimited by space)')
parser.add_argument('--custom-names', type=str, nargs='+',
help='names of custom datasets (delimited by space)')
parser.add_argument('--root', type=str, default='', help='path to data root')
parser.add_argument('--classes', type=str, nargs='+',
help='name of classes in classification dataset')
parser.add_argument('--out')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
help='Modify config options using the command-line')
args = parser.parse_args()
cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()
if args.config_file:
merge_from_files_with_base(cfg, args.config_file)
reset_config(cfg, args)
cfg.merge_from_list(args.opts)
set_random_seed(cfg.train.seed)
print('Show configuration\n{}\n'.format(cfg))
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
if cfg.use_gpu:
torch.backends.cudnn.benchmark = True
datamanager = build_datamanager(cfg, args.classes)
num_train_classes = datamanager.num_train_pids
print('Building main model: {}'.format(cfg.model.name))
model = torchreid.models.build_model(**model_kwargs(cfg, num_train_classes))
macs, num_params = get_model_complexity_info(model, (3, cfg.data.height, cfg.data.width),
as_strings=False, verbose=False, print_per_layer_stat=False)
print('Main model complexity: M params={:,} G flops={:,}'.format(num_params / 10**6, macs * 2 / 10**9))
if args.out:
out = list()
out.append({'key': 'size', 'display_name': 'Size', 'value': num_params / 10**6, 'unit': 'Mp'})
out.append({'key': 'complexity', 'display_name': 'Complexity', 'value': 2 * macs / 10**9,
'unit': 'GFLOPs'})
print('dump to' + args.out)
with open(args.out, 'w') as write_file:
json.dump(out, write_file, indent=4)
if __name__ == '__main__':
main()
|
py | 1a525e5f38c854dcecb9d3255252b281e706a247 | from django.shortcuts import render, HttpResponse
from django.http import JsonResponse
from rest_framework.views import APIView
from smtest import models
from smtest.utils.permission import SVIPPermission
from smtest.utils.throttle import TestThrottle,VisitThrottle
from rest_framework.versioning import QueryParameterVersioning, URLPathVersioning
def md5(username):
import hashlib
import time
ctime = str(time.time())
m = hashlib.md5(bytes(username, encoding='utf-8'))
m.update(bytes(ctime, encoding='utf-8'))
return m.hexdigest()
# 用户名密码认证, 获取token(rest_framework内部已经实现了生成token的方法)
class AuthView(APIView):
authentication_classes = [] # 此时则不会进行认证
permission_classes = [] # 此时不会校验权限(尽管不校验认证, 但是rest framework默认认为未认证的是一个匿名用户,也会进行权限校验)
throttle_classes = [VisitThrottle,] # 节流控制
def post(self,request,*args,**kwargs):
ret = {'code': 1000, 'msg': None}
try:
username = request._request.POST.get('username')
password = request._request.POST.get('password')
user = models.UserInfo.objects.filter(username=username, password=password).first()
if not user:
ret['code'] = 1001
ret['msg'] = "用户名或密码错误"
return JsonResponse(ret)
# 生成token
token = md5(username)
models.UserToken.objects.update_or_create(user=user, defaults={'token': token})
ret['token'] = token
except Exception as e:
print(e)
ret['code'] = 1002
ret['msg'] = "未知错误"
return JsonResponse(ret)
class StudentsView(APIView):
def get(self,request,*args,**kwargs):
self.dispatch # 进入APIVIEW的dispatch
print(request.user, request.auth)
print('get...')
return HttpResponse('GET...')
def post(self,request,*args,**kwargs):
return HttpResponse('POST...')
def put(self,request,*args,**kwargs):
return HttpResponse('PUT...')
def delete(self,request,*args,**kwargs):
return HttpResponse('DELETE...')
class OrderView(APIView):
# 局部权限类
permission_classes = [SVIPPermission,]
throttle_classes = [TestThrottle,]
def get(self,request,*args,**kwargs):
return HttpResponse('svip order...') |
py | 1a525f4aca8159b07f228e89b1ba6de74fbb1e4a | import numpy as np
class PriorProbability():
def __init__(self):
"""
This is a simple classifier that only uses prior probability to classify
points. It just looks at the classes for each data point and always predicts
the most common class.
"""
self.most_common_class = None
def fit(self, features, targets):
"""
Implement a classifier that works by prior probability. Takes in features
and targets and fits the features to the targets using prior probability.
Args:
features (np.array): numpy array of size NxF containing features, where N is
number of examples and F is number of features.
targets (np.array): numpy array containing class labels for each of the N
examples.
"""
counts = np.bincount(targets.astype(int))
self.most_common_class = np.argmax(counts)
def predict(self, data):
"""
Takes in features as a numpy array and predicts classes for each point using
the trained model.
Args:
features (np.array): numpy array of size NxF containing features, where N is
number of examples and F is number of features.
"""
return np.full(data.shape[0], self.most_common_class) |
py | 1a5262188b5d123767e21450b9ed2fd137f04e01 | import torch
from torch.ao.quantization.observer import ObserverBase
class ModelReportObserver(ObserverBase):
r"""This observer is used to record additional information regarding keeping track
of S = average_batch_activation_range/epoch_activation_range.
The purpose of this information is to prepare a report to present to users on whether
Dynamic or Static Quantization is more appropriate for their model given the general
distributions of their data.
* :attr:`num_batches_tracked` specifies number of batches passed through the observer
* :attr:`average_batch_activation_range` defines average across the ranges of each batch passed through
* :attr:`epoch_activation_min` defines the minimum value passed through the observer
* :attr:`epoch_activation_max` defines the maximum value passed through the observer
Note: this tool is meant for FX Graph Mode Quantization
"""
def __init__(self):
super().__init__(torch.qint8)
self.num_batches_tracked = 0
# keep track of the min and mix of the range for average batch and epoch as a whole
self.average_batch_activation_range = torch.tensor(float(0))
self.epoch_activation_min = torch.tensor(float("inf"))
self.epoch_activation_max = torch.tensor(float("-inf"))
def forward(self, x):
x_copy = x.detach() # avoid keeping autograd tape
x_copy = x_copy.to(self.epoch_activation_min.dtype)
min_val_cur, max_val_cur = torch.aminmax(x_copy)
# calculate new epoch range values
epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)
epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)
self.epoch_activation_min.copy_(epoch_min_val)
self.epoch_activation_max.copy_(epoch_max_val)
# calculate the average batch activation range
current_batch_range = max_val_cur - min_val_cur
new_range = (
self.average_batch_activation_range * self.num_batches_tracked
+ current_batch_range
) / (self.num_batches_tracked + 1)
self.average_batch_activation_range = new_range
self.num_batches_tracked += 1 # new batch was processed
# return the passed in the value
return x
@torch.jit.export
def get_batch_to_epoch_ratio(self):
epoch_activation_range = self.epoch_activation_max - self.epoch_activation_min
if epoch_activation_range == torch.tensor(float(0)):
raise ValueError("Range for Epoch is 0")
elif epoch_activation_range == torch.tensor(float("inf")):
raise ValueError(
"No data has been run through observer or infinity value present"
)
else:
return self.average_batch_activation_range / epoch_activation_range
@torch.jit.export
def reset_batch_and_epoch_values(self):
# set all the values back to their original defaults for a new epoch
self.num_batches_tracked = 0
self.average_batch_activation_range = torch.tensor(float(0))
self.epoch_activation_min = torch.tensor(float("inf"))
self.epoch_activation_max = torch.tensor(float("-inf"))
@torch.jit.export
def calculate_qparams(self):
raise Exception(
"calculate_qparams should not be called for ModelReportObserver"
)
|
py | 1a52623a9a96d489e789a7d9d1093b8ad42d0859 | import difflib
import email.parser
import inspect
import json
import os
import re
import sys
import pytest
from .env import H2Conf
class TestPost:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
TestPost._local_dir = os.path.dirname(inspect.getfile(TestPost))
H2Conf(env).add_vhost_cgi().install()
assert env.apache_restart() == 0
def local_src(self, fname):
return os.path.join(TestPost._local_dir, fname)
# upload and GET again using curl, compare to original content
def curl_upload_and_verify(self, env, fname, options=None):
url = env.mkurl("https", "cgi", "/upload.py")
fpath = os.path.join(env.gen_dir, fname)
r = env.curl_upload(url, fpath, options=options)
assert r.exit_code == 0, f"{r}"
assert 200 <= r.response["status"] < 300
r2 = env.curl_get(r.response["header"]["location"])
assert r2.exit_code == 0
assert r2.response["status"] == 200
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert src == r2.response["body"]
def test_h2_004_01(self, env):
self.curl_upload_and_verify(env, "data-1k", ["-vvv", "--http1.1"])
self.curl_upload_and_verify(env, "data-1k", ["--http2"])
def test_h2_004_02(self, env):
self.curl_upload_and_verify(env, "data-10k", ["--http1.1"])
self.curl_upload_and_verify(env, "data-10k", ["--http2"])
def test_h2_004_03(self, env):
self.curl_upload_and_verify(env, "data-100k", ["--http1.1"])
self.curl_upload_and_verify(env, "data-100k", ["--http2"])
def test_h2_004_04(self, env):
self.curl_upload_and_verify(env, "data-1m", ["--http1.1"])
self.curl_upload_and_verify(env, "data-1m", ["--http2"])
def test_h2_004_05(self, env):
self.curl_upload_and_verify(env, "data-1k", ["-v", "--http1.1", "-H", "Expect: 100-continue"])
self.curl_upload_and_verify(env, "data-1k", ["-v", "--http2", "-H", "Expect: 100-continue"])
@pytest.mark.skipif(True, reason="python3 regresses in chunked inputs to cgi")
def test_h2_004_06(self, env):
self.curl_upload_and_verify(env, "data-1k", ["--http1.1", "-H", "Content-Length: "])
self.curl_upload_and_verify(env, "data-1k", ["--http2", "-H", "Content-Length: "])
@pytest.mark.parametrize("name, value", [
("HTTP2", "on"),
("H2PUSH", "off"),
("H2_PUSHED", ""),
("H2_PUSHED_ON", ""),
("H2_STREAM_ID", "1"),
("H2_STREAM_TAG", r'\d+-1'),
])
def test_h2_004_07(self, env, name, value):
url = env.mkurl("https", "cgi", "/env.py")
r = env.curl_post_value(url, "name", name)
assert r.exit_code == 0
assert r.response["status"] == 200
m = re.match("{0}=(.*)".format(name), r.response["body"].decode('utf-8'))
assert m
assert re.match(value, m.group(1))
# POST some data using nghttp and see it echo'ed properly back
def nghttp_post_and_verify(self, env, fname, options=None):
url = env.mkurl("https", "cgi", "/echo.py")
fpath = os.path.join(env.gen_dir, fname)
r = env.nghttp().upload(url, fpath, options=options)
assert r.exit_code == 0
assert r.response["status"] >= 200 and r.response["status"] < 300
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert 'request-length' in r.response["header"]
assert int(r.response["header"]['request-length']) == len(src)
if len(r.response["body"]) != len(src):
sys.stderr.writelines(difflib.unified_diff(
src.decode().splitlines(True),
r.response["body"].decode().splitlines(True),
fromfile='source',
tofile='response'
))
assert len(r.response["body"]) == len(src)
assert r.response["body"] == src, f"expected '{src}', got '{r.response['body']}'"
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_21(self, env, name):
self.nghttp_post_and_verify(env, name, [])
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m",
])
def test_h2_004_22(self, env, name, repeat):
self.nghttp_post_and_verify(env, name, ["--no-content-length"])
# upload and GET again using nghttp, compare to original content
def nghttp_upload_and_verify(self, env, fname, options=None):
url = env.mkurl("https", "cgi", "/upload.py")
fpath = os.path.join(env.gen_dir, fname)
r = env.nghttp().upload_file(url, fpath, options=options)
assert r.exit_code == 0
assert r.response["status"] >= 200 and r.response["status"] < 300
assert r.response["header"]["location"]
r2 = env.nghttp().get(r.response["header"]["location"])
assert r2.exit_code == 0
assert r2.response["status"] == 200
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert src == r2.response["body"]
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_23(self, env, name, repeat):
self.nghttp_upload_and_verify(env, name, [])
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_24(self, env, name, repeat):
self.nghttp_upload_and_verify(env, name, ["--expect-continue"])
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_25(self, env, name, repeat):
self.nghttp_upload_and_verify(env, name, ["--no-content-length"])
def test_h2_004_30(self, env):
# issue: #203
resource = "data-1k"
full_length = 1000
chunk = 200
self.curl_upload_and_verify(env, resource, ["-v", "--http2"])
logfile = os.path.join(env.server_logs_dir, "test_004_30")
if os.path.isfile(logfile):
os.remove(logfile)
H2Conf(env).add("""
LogFormat "{ \\"request\\": \\"%r\\", \\"status\\": %>s, \\"bytes_resp_B\\": %B, \\"bytes_tx_O\\": %O, \\"bytes_rx_I\\": %I, \\"bytes_rx_tx_S\\": %S }" issue_203
CustomLog logs/test_004_30 issue_203
""").add_vhost_cgi().install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/files/{0}".format(resource))
r = env.curl_get(url, 5, options=["--http2"])
assert r.response["status"] == 200
r = env.curl_get(url, 5, options=["--http1.1", "-H", "Range: bytes=0-{0}".format(chunk-1)])
assert 206 == r.response["status"]
assert chunk == len(r.response["body"].decode('utf-8'))
r = env.curl_get(url, 5, options=["--http2", "-H", "Range: bytes=0-{0}".format(chunk-1)])
assert 206 == r.response["status"]
assert chunk == len(r.response["body"].decode('utf-8'))
# now check what response lengths have actually been reported
lines = open(logfile).readlines()
log_h2_full = json.loads(lines[-3])
log_h1 = json.loads(lines[-2])
log_h2 = json.loads(lines[-1])
assert log_h2_full['bytes_rx_I'] > 0
assert log_h2_full['bytes_resp_B'] == full_length
assert log_h2_full['bytes_tx_O'] > full_length
assert log_h1['bytes_rx_I'] > 0 # input bytes received
assert log_h1['bytes_resp_B'] == chunk # response bytes sent (payload)
assert log_h1['bytes_tx_O'] > chunk # output bytes sent
assert log_h2['bytes_rx_I'] > 0
assert log_h2['bytes_resp_B'] == chunk
assert log_h2['bytes_tx_O'] > chunk
def test_h2_004_40(self, env):
# echo content using h2test_module "echo" handler
def post_and_verify(fname, options=None):
url = env.mkurl("https", "cgi", "/h2test/echo")
fpath = os.path.join(env.gen_dir, fname)
r = env.curl_upload(url, fpath, options=options)
assert r.exit_code == 0
assert r.response["status"] >= 200 and r.response["status"] < 300
ct = r.response["header"]["content-type"]
mail_hd = "Content-Type: " + ct + "\r\nMIME-Version: 1.0\r\n\r\n"
mime_msg = mail_hd.encode() + r.response["body"]
# this MIME API is from hell
body = email.parser.BytesParser().parsebytes(mime_msg)
assert body
assert body.is_multipart()
filepart = None
for part in body.walk():
if fname == part.get_filename():
filepart = part
assert filepart
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert src == filepart.get_payload(decode=True)
post_and_verify("data-1k", [])
|
py | 1a5264ef3ad9a55151c01b9951ba39ac0941223a | """
Django settings for django_test project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*tb5175r6s&y^_p$z%z=6gpswk-rcazy9(9k(bhp5nemciovz5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frontend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django_test',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'USER': 'roman',
'PASSWORD': 'admin',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"), )
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
|
py | 1a5265357630d7d516d2a7a670e6224d6a140779 | """
PyQt App that leverages completed model for image inpainting
"""
import sys
import os
import random
import torch
import argparse
from PIL import Image
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from torchvision.utils import make_grid
from torchvision.utils import save_image
from torchvision import transforms
from partial_conv_net import PartialConvUNet
from places2_train import unnormalize, MEAN, STDDEV
def exceeds_bounds(y):
if y >= 250:
return True
else:
return False
class Drawer(QWidget):
newPoint = pyqtSignal(QPoint)
def __init__(self, image_path, parent=None):
QWidget.__init__(self, parent)
self.path = QPainterPath()
self.image_path = image_path
def paintEvent(self, event):
painter = QPainter(self)
painter.drawPixmap(QRect(0, 0, 256, 256), QPixmap(self.image_path))
painter.setPen(QPen(Qt.black, 12))
painter.drawPath(self.path)
def mousePressEvent(self, event):
if exceeds_bounds(event.pos().y()):
return
self.path.moveTo(event.pos())
self.update()
def mouseMoveEvent(self, event):
if exceeds_bounds(event.pos().y()):
return
self.path.lineTo(event.pos())
self.newPoint.emit(event.pos())
self.update()
def sizeHint(self):
return QSize(256, 256)
def resetPath(self):
self.path = QPainterPath()
self.update()
class InpaintApp(QWidget):
def __init__(self, image_num):
super().__init__()
self.setLayout(QVBoxLayout())
self.title = 'Inpaint Application'
self.width = 276
self.height = 350
self.cwd = os.getcwd()
image_num = str(image_num).zfill(8)
image_path = self.cwd + "/val_256/Places365_val_{}.jpg".format(image_num)
self.save_path = self.cwd + "/test.jpg"
self.open_and_save_img(image_path, self.save_path)
self.drawer = Drawer(self.save_path, self)
self.setWindowTitle(self.title)
self.setGeometry(200, 200, self.width, self.height)
self.layout().addWidget(self.drawer)
self.layout().addWidget(QPushButton("Inpaint!", clicked=self.inpaint))
self.img_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(MEAN, STDDEV)])
self.mask_transform = transforms.ToTensor()
self.device = torch.device("cpu")
model_dict = torch.load(self.cwd + "/model_e1_i56358.pth", map_location="cpu")
model = PartialConvUNet()
model.load_state_dict(model_dict["model"])
model = model.to(self.device)
self.model = model
self.model.eval()
self.show()
def open_and_save_img(self, path, dest):
img = Image.open(path)
img.save(dest)
def inpaint(self):
mask = QImage(256, 256, QImage.Format_RGB32)
mask.fill(qRgb(255, 255, 255))
painter = QPainter()
painter.begin(mask)
painter.setPen(QPen(Qt.black, 12))
painter.drawPath(self.drawer.path)
painter.end()
mask.save("mask.png", "png")
# open image and normalize before forward pass
mask = Image.open(self.cwd + "/mask.png")
mask = self.mask_transform(mask.convert("RGB"))
gt_img = Image.open(self.save_path)
gt_img = self.img_transform(gt_img.convert("RGB"))
img = gt_img * mask
# adds dimension of 1 (batch) to image
img.unsqueeze_(0)
gt_img.unsqueeze_(0)
mask.unsqueeze_(0)
# forward pass
with torch.no_grad():
output = self.model(img.to(self.device), mask.to(self.device))
# unnormalize the image and output
output = mask * img + (1 - mask) * output
grid = make_grid(unnormalize(output))
save_image(grid, "test.jpg")
self.drawer.resetPath()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--img", type=int, default=1)
args = parser.parse_args()
app = QApplication(sys.argv)
ex = InpaintApp(args.img)
sys.exit(app.exec_()) |
py | 1a526576f5e5611b0fd60ddcec87cbbbc472da4d | # Copyright 2019 Cloudification GmbH
#
# Author: Sergey Kraynev <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
import requests
import designate.tests
from designate import exceptions
from designate import objects
from designate.backend import impl_akamai_v2 as akamai
from designate.tests import fixtures
class AkamaiBackendTestCase(designate.tests.TestCase):
def setUp(self):
super(AkamaiBackendTestCase, self).setUp()
self.zone = objects.Zone(
id='cca7908b-dad4-4c50-adba-fb67d4c556e8',
name='example.com.',
email='[email protected]'
)
self.target = {
'id': '4588652b-50e7-46b9-b688-a9bad40a873e',
'type': 'akamai_v2',
'masters': [
{'host': '192.168.1.1', 'port': 53},
{'host': '192.168.1.2', 'port': 35}
],
'options': [
{'key': 'host', 'value': '192.168.2.3'},
{'key': 'port', 'value': '53'},
{'key': 'akamai_client_secret', 'value': 'client_secret'},
{'key': 'akamai_host', 'value': 'host_value'},
{'key': 'akamai_access_token', 'value': 'access_token'},
{'key': 'akamai_client_token', 'value': 'client_token'},
{'key': 'akamai_contract_id', 'value': 'G-XYW'},
{'key': 'akamai_gid', 'value': '777'}
],
}
def gen_response(self, status_code, reason, json_data=None):
response = requests.models.Response()
response.status_code = status_code
response.reason = reason
response._content = json.dumps(json_data or {}).encode('utf-8')
return response
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_create_zone_missed_contract_id(self, mock_post, mock_auth):
self.target['options'].remove(
{'key': 'akamai_contract_id', 'value': 'G-XYW'})
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
with fixtures.random_seed(0):
self.assertRaisesRegex(
exceptions.Backend,
'contractId is required for zone creation',
backend.create_zone, self.admin_context, self.zone)
mock_post.assert_not_called()
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_create_zone(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
with fixtures.random_seed(0):
backend.create_zone(self.admin_context, self.zone)
project_id = self.admin_context.project_id or self.zone.tenant_id
mock_post.assert_called_once_with(
json={
'comment': 'Created by Designate for Tenant %s' % project_id,
'masters': ['192.168.1.1', '192.168.1.2'],
'type': 'secondary', 'zone': u'example.com.'
},
params={
'gid': '777',
'contractId': 'G-XYW'
},
url='https://host_value/config-dns/v2/zones'
)
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_create_zone_duplicate_zone(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.return_value = self.gen_response(409, 'Conflict')
with fixtures.random_seed(0):
backend.create_zone(self.admin_context, self.zone)
project_id = self.admin_context.project_id or self.zone.tenant_id
mock_post.assert_called_once_with(
json={
'comment': 'Created by Designate for Tenant %s' % project_id,
'masters': ['192.168.1.1', '192.168.1.2'],
'type': 'secondary', 'zone': u'example.com.'
},
params={
'gid': '777',
'contractId': 'G-XYW'
},
url='https://host_value/config-dns/v2/zones'
)
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_create_zone_with_tsig_key(self, mock_post, mock_auth):
self.target['options'].extend([
{'key': 'tsig_key_name', 'value': 'test_key'},
{'key': 'tsig_key_algorithm', 'value': 'hmac-sha512'},
{'key': 'tsig_key_secret', 'value': 'aaaabbbbccc'}
])
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
with fixtures.random_seed(0):
backend.create_zone(self.admin_context, self.zone)
project_id = self.admin_context.project_id or self.zone.tenant_id
mock_post.assert_called_once_with(
json={
'comment': 'Created by Designate for Tenant %s' % project_id,
'masters': ['192.168.1.1', '192.168.1.2'],
'type': 'secondary',
'zone': 'example.com.',
'tsigKey': {
'name': 'test_key',
'algorithm': 'hmac-sha512',
'secret': 'aaaabbbbccc',
}
},
params={
'gid': '777',
'contractId': 'G-XYW'
},
url='https://host_value/config-dns/v2/zones'
)
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_create_zone_raise_error(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
json_data = {
'title': 'Missing parameter',
'detail': 'Missed A option'
}
mock_post.return_value = self.gen_response(
400, 'Bad Request', json_data)
with fixtures.random_seed(0):
self.assertRaisesRegex(
exceptions.Backend,
'Zone creation failed due to: Missed A option',
backend.create_zone, self.admin_context, self.zone)
project_id = self.admin_context.project_id or self.zone.tenant_id
mock_post.assert_called_once_with(
json={
'comment': 'Created by Designate for Tenant %s' % project_id,
'masters': ['192.168.1.1', '192.168.1.2'],
'type': 'secondary', 'zone': 'example.com.'
},
params={
'gid': '777',
'contractId': 'G-XYW'
},
url='https://host_value/config-dns/v2/zones'
)
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_force_delete_zone(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.return_value = self.gen_response(200, 'Success')
with fixtures.random_seed(0):
backend.delete_zone(self.admin_context, self.zone)
mock_post.assert_called_once_with(
json={
'zones': ['example.com.']
},
params={
'force': True
},
url='https://host_value/config-dns/v2/zones/delete-requests'
)
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_force_delete_zone_raise_error(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.return_value = self.gen_response(
403, 'Bad Request', {'detail': 'Unexpected error'})
with fixtures.random_seed(0):
self.assertRaisesRegex(
exceptions.Backend,
'Zone deletion failed due to: Unexpected error',
backend.delete_zone, self.admin_context, self.zone)
mock_post.assert_called_once_with(
json={
'zones': ['example.com.']
},
params={
'force': True
},
url='https://host_value/config-dns/v2/zones/delete-requests'
)
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_force_delete_zone_raise_error_404(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.return_value = self.gen_response(
404, 'Bad Request', {'detail': 'Unexpected error'})
with fixtures.random_seed(0):
backend.delete_zone(self.admin_context, self.zone)
mock_post.assert_called_once_with(
json={
'zones': ['example.com.']
},
params={
'force': True
},
url='https://host_value/config-dns/v2/zones/delete-requests'
)
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
@mock.patch.object(akamai.requests.Session, 'get')
def test_soft_delete_zone(self, mock_get, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.side_effect = [
# emulate, when Force=True is forbidden
self.gen_response(403, 'Forbidden'),
# emulate request, when Force=False
self.gen_response(200, 'Success', {'requestId': 'nice_id'}),
]
# emulate max 9 failed attempts and 1 success
mock_get.side_effect = 9 * [
self.gen_response(200, 'Success', {'isComplete': False})
] + [
self.gen_response(200, 'Success', {'isComplete': True})
]
with fixtures.random_seed(0), \
mock.patch.object(akamai.time, 'sleep') as mock_sleep:
mock_sleep.return_value = None
backend.delete_zone(self.admin_context, self.zone)
self.assertEqual(10, mock_sleep.call_count)
url = 'https://host_value/config-dns/v2/zones/delete-requests/nice_id'
mock_get.assert_has_calls(9 * [mock.call(url=url)])
mock_post.assert_has_calls([
mock.call(
json={'zones': ['example.com.']},
params={'force': True},
url='https://host_value/config-dns/v2/zones/delete-requests'
),
mock.call(
json={'zones': ['example.com.']},
params={'force': False},
url='https://host_value/config-dns/v2/zones/delete-requests'
)
])
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
@mock.patch.object(akamai.requests.Session, 'get')
def test_soft_delete_zone_failed_after_10_attempts(
self, mock_get, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.side_effect = [
# emulate, when Force=True is forbidden
self.gen_response(403, 'Forbidden'),
# emulate request, when Force=False
self.gen_response(200, 'Success', {'requestId': 'nice_id'}),
]
# emulate max 10 failed attempts
mock_get.side_effect = 10 * [
self.gen_response(200, 'Success', {'isComplete': False})
]
with fixtures.random_seed(0), \
mock.patch.object(akamai.time, 'sleep') as mock_sleep:
mock_sleep.return_value = None
self.assertRaisesRegex(
exceptions.Backend,
'Zone was not deleted after 10 attempts',
backend.delete_zone, self.admin_context, self.zone)
self.assertEqual(10, mock_sleep.call_count)
url = 'https://host_value/config-dns/v2/zones/delete-requests/nice_id'
mock_get.assert_has_calls(10 * [mock.call(url=url)])
mock_post.assert_has_calls([
mock.call(
json={'zones': ['example.com.']},
params={'force': True},
url='https://host_value/config-dns/v2/zones/delete-requests'
),
mock.call(
json={'zones': ['example.com.']},
params={'force': False},
url='https://host_value/config-dns/v2/zones/delete-requests'
)
])
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_soft_delete_zone_raise_error(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.side_effect = [
# emulate, when Force=True is forbidden
self.gen_response(403, 'Forbidden'),
# emulate request, when Force=False
self.gen_response(409, 'Conflict', {'detail': 'Intenal Error'})
]
with fixtures.random_seed(0):
self.assertRaisesRegex(
exceptions.Backend,
'Zone deletion failed due to: Intenal Error',
backend.delete_zone, self.admin_context, self.zone)
mock_post.assert_has_calls([
mock.call(
json={'zones': [u'example.com.']},
params={'force': True},
url='https://host_value/config-dns/v2/zones/delete-requests'
),
mock.call(
json={'zones': [u'example.com.']},
params={'force': False},
url='https://host_value/config-dns/v2/zones/delete-requests'
)
])
@mock.patch.object(akamai, 'edgegrid')
@mock.patch.object(akamai.requests.Session, 'post')
def test_soft_delete_zone_missed_request_id(self, mock_post, mock_auth):
backend = akamai.AkamaiBackend(
objects.PoolTarget.from_dict(self.target)
)
mock_auth.EdgeGridAuth.assert_called_once_with(
access_token='access_token',
client_secret='client_secret',
client_token='client_token'
)
mock_post.side_effect = [
# emulate, when Force=True is forbidden
self.gen_response(403, 'Forbidden'),
# emulate request, when Force=False
self.gen_response(200, 'Success')
]
with fixtures.random_seed(0):
self.assertRaisesRegex(
exceptions.Backend,
'Zone deletion failed due to: requestId missed in response',
backend.delete_zone, self.admin_context, self.zone)
mock_post.assert_has_calls([
mock.call(
json={'zones': [u'example.com.']},
params={'force': True},
url='https://host_value/config-dns/v2/zones/delete-requests'
),
mock.call(
json={'zones': [u'example.com.']},
params={'force': False},
url='https://host_value/config-dns/v2/zones/delete-requests'
)
])
|
py | 1a5265ed085987cd39728024b220fae7a3fde044 | import ctypes
import os
from casual.xatmi.xatmi import tpalloc, tpfree, tperrno, tperrnostring, \
X_OCTET, CASUAL_BUFFER_BINARY_TYPE, CASUAL_BUFFER_BINARY_SUBTYPE, \
CASUAL_BUFFER_JSON_TYPE, CASUAL_BUFFER_JSON_SUBTYPE, \
CASUAL_BUFFER_YAML_TYPE, CASUAL_BUFFER_YAML_SUBTYPE, \
CASUAL_BUFFER_XML_TYPE, CASUAL_BUFFER_XML_SUBTYPE
from casual.server.exception import BufferError
BufferTypeMap = {
'x_octet': (X_OCTET, 0),
'binary': (CASUAL_BUFFER_BINARY_TYPE, CASUAL_BUFFER_BINARY_SUBTYPE),
'json': (CASUAL_BUFFER_JSON_TYPE, CASUAL_BUFFER_JSON_SUBTYPE),
'yaml': (CASUAL_BUFFER_YAML_TYPE, CASUAL_BUFFER_YAML_SUBTYPE),
'xml': (CASUAL_BUFFER_XML_TYPE, CASUAL_BUFFER_XML_SUBTYPE)
}
def x_octet():
return BufferTypeMap['x_octet']
def binary():
return BufferTypeMap['binary']
def json():
return BufferTypeMap['json']
def yaml():
return BufferTypeMap['yaml']
def xml():
return BufferTypeMap['xml']
def _convert( data):
try:
data = data.encode()
is_bytes = False
except (UnicodeDecodeError, AttributeError):
is_bytes = True
return is_bytes, data
class Buffer(object):
def __init__(self, buffertype, subtype, data=None):
if data:
self.is_bytes, data = _convert( data)
self.size = ctypes.c_long(len(data))
self.holder = tpalloc(buffertype, subtype, self.size)
if self.holder:
self.set(data)
else:
raise BufferError(tperrnostring(tperrno()))
else:
self.size = ctypes.c_long(1024)
self.holder = tpalloc(buffertype, subtype, self.size)
def set(self, data):
ctypes.memmove(self.holder, data, len(data))
def raw(self):
return self.holder
def data(self):
return self.holder[0:self.size.value]
def __del__(self):
if self.holder and tpfree:
tpfree( self.holder)
#
# Supported buffer type
#
class JsonBuffer(Buffer):
def __init__(self, data = None):
buffertype, subtype = json()
try:
super().__init__(buffertype, subtype, data)
except TypeError:
super( JsonBuffer, self).__init__(buffertype, subtype, data)
#
# Supported buffer type
#
class XmlBuffer(Buffer):
def __init__(self, data = None):
buffertype, subtype = xml()
try:
super().__init__(buffertype, subtype, data)
except TypeError:
super( XmlBuffer, self).__init__(buffertype, subtype, data)
def create_buffer(buffer):
theType=type(buffer)
if theType is XmlBuffer:
return XmlBuffer()
elif theType is JsonBuffer:
return JsonBuffer()
else:
raise BufferError("Unknown buffer type")
|
py | 1a5265eef973edfa14624f63b0c90aeab56d5a2e | # Coffee Machine Program Requirements
# 1. Prompt user by asking “What would you like? (espresso/latte/cappuccino):”
# a. Check the user’s input to decide what to do next.
# b. The prompt should show every time action has completed, e.g. once the drink is
# dispensed. The prompt should show again to serve the next customer.
# 2. Turn off the Coffee Machine by entering “off” to the prompt.
# a. For maintainers of the coffee machine, they can use “off” as the secret word to turn off
# the machine. Your code should end execution when this happens.
# 3. Print report.
# a. When the user enters “report” to the prompt, a report should be generated that shows
# the current resource values. e.g.
# Water: 100ml
# Milk: 50ml
# Coffee: 76g
# Money: $2.5
# 4. Check resources sufficient?
# a. When the user chooses a drink, the program should check if there are enough
# resources to make that drink.
# b. E.g. if Latte requires 200ml water but there is only 100ml left in the machine. It should
# not continue to make the drink but print: “Sorry there is not enough water.”
# c. The same should happen if another resource is depleted, e.g. milk or coffee.
# 5. Process coins.
# a. If there are sufficient resources to make the drink selected, then the program should
# prompt the user to insert coins.
# b. Remember that quarters = $0.25, dimes = $0.10, nickles = $0.05, pennies = $0.01
# c. Calculate the monetary value of the coins inserted. E.g. 1 quarter, 2 dimes, 1 nickel, 2
# pennies = 0.25 + 0.1 x 2 + 0.05 + 0.01 x 2 = $0.52
# 6. Check transaction successful?
# a. Check that the user has inserted enough money to purchase the drink they selected.
# E.g Latte cost $2.50, but they only inserted $0.52 then after counting the coins the
# program should say “Sorry that's not enough money. Money refunded.”.
# b. But if the user has inserted enough money, then the cost of the drink gets added to the
# machine as the profit and this will be reflected the next time “report” is triggered. E.g.
# Water: 100ml
# Milk: 50ml
# Coffee: 76g
# Money: $2.5
# c. If the user has inserted too much money, the machine should offer change.
# E.g. “Here is $2.45 dollars in change.” The change should be rounded to 2 decimal
# places.
# 7. Make Coffee.
# a. If the transaction is successful and there are enough resources to make the drink the
# user selected, then the ingredients to make the drink should be deducted from the
# coffee machine resources.
# E.g. report before purchasing latte:
# Water: 300ml
# Milk: 200ml
# Coffee: 100g
# Money: $0
# Report after purchasing latte:
# Water: 100ml
# Milk: 50ml
# Coffee: 76g
# Money: $2.5
# b. Once all resources have been deducted, tell the user “Here is your latte. Enjoy!”. If
# latte was their choice of drink.
|
py | 1a5266353ebb9a55636962afcb07fbf86e50eea3 | from time import sleep
import picamera
WAIT_TIME = 10
with picamera.PiCamera() as camera:
camera.resolution = (1024, 768)
for filename in camera.capture_continuous('/home/pi/timelapse/img{timestamp:%D-%m-%y_%H-%M-%S}.jpg'):
sleep(WAIT_TIME)
|
py | 1a52667bf629990decf8513b88aee2c68998d482 | #
# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved.
#
# OpenArkCompiler is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#
from api import *
SUPO0 = {
"compile": [
C2ast(
clang="${OUT_ROOT}/tools/bin/clang",
include_path=[
"${OUT_ROOT}/${MAPLE_BUILD_TYPE}/lib/include",
"${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include",
"${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include",
"../lib"
],
option="--target=aarch64 -U __SIZEOF_INT128__",
infile="${APP}.c",
outfile="${APP}.ast"
),
Mplfe(
hir2mpl="${OUT_ROOT}/${MAPLE_BUILD_TYPE}/bin/hir2mpl",
infile="${APP}.ast",
outfile="${APP}.mpl"
),
Maple(
maple="${OUT_ROOT}/${MAPLE_BUILD_TYPE}/bin/maple",
run=["mplcg"],
option={
"mplcg": "--quiet"
},
global_option="",
infile="${APP}.mpl"
),
CLinker(
infile="${APP}.s",
front_option="-O2 -static -L../lib -std=c89 -s",
outfile="${APP}.out",
back_option="-lst -lm"
)
],
"run": [
Shell(
"${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc ${APP}.out > output.log 2>&1"
),
CheckFileEqual(
file1="output.log",
file2="expected.txt"
)
]
}
|
py | 1a526719ec7cb3983a7dd53de64d3657be6550c8 | """react_django_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from django.views.decorators.cache import never_cache
from django.views.decorators.gzip import gzip_page
from django.views.generic import TemplateView
from django.views.static import serve
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^static/(?P<path>.*)$', gzip_page(serve), kwargs=dict(document_root=settings.FRONTEND_STATIC_ROOT)),
url(r'^$', never_cache(gzip_page(TemplateView.as_view(template_name='index.html'))), name='main'),
]
|
py | 1a5268851a2c126c9be7db6c256994c92f5140ad | # -*- coding: utf-8 -*-
import sys
import gc
from hypothesis import given
from hypothesis.extra import numpy as hynp
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
assert_raises_regex,
)
import textwrap
class TestArrayRepr:
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
@pytest.mark.xfail(reason="See gh-10544")
def test_object_subclass(self):
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
# test that object + subclass is OK:
x = sub([None, None])
assert_equal(repr(x), 'sub([None, None], dtype=object)')
assert_equal(str(x), '[None None]')
x = sub([None, sub([None, None])])
assert_equal(repr(x),
'sub([None, sub([None, None], dtype=object)], dtype=object)')
assert_equal(str(x), '[None sub([None, None], dtype=object)]')
def test_0d_object_subclass(self):
# make sure that subclasses which return 0ds instead
# of scalars don't cause infinite recursion in str
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
x = sub(1)
assert_equal(repr(x), 'sub(1)')
assert_equal(str(x), '1')
x = sub([1, 1])
assert_equal(repr(x), 'sub([1, 1])')
assert_equal(str(x), '[1 1]')
# check it works properly with object arrays too
x = sub(None)
assert_equal(repr(x), 'sub(None, dtype=object)')
assert_equal(str(x), 'None')
# plus recursive object arrays (even depth > 1)
y = sub(None)
x[()] = y
y[()] = x
assert_equal(repr(x),
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
assert_equal(str(x), '...')
x[()] = 0 # resolve circular references for garbage collector
# nested 0d-subclass-object
x = sub(None)
x[()] = sub(None)
assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
assert_equal(str(x), 'None')
# gh-10663
class DuckCounter(np.ndarray):
def __getitem__(self, item):
result = super().__getitem__(item)
if not isinstance(result, DuckCounter):
result = result[...].view(DuckCounter)
return result
def to_string(self):
return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
def __str__(self):
if self.shape == ():
return self.to_string()
else:
fmt = {'all': lambda x: x.to_string()}
return np.array2string(self, formatter=fmt)
dc = np.arange(5).view(DuckCounter)
assert_equal(str(dc), "[zero one two many many]")
assert_equal(str(dc[0]), "zero")
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr0d[()] = 0 # resolve recursion for garbage collector
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
arr1d[1] = 0 # resolve recursion for garbage collector
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
first[()] = 0 # resolve circular references for garbage collector
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
def test_void_scalar_recursion(self):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
def test_fieldless_structured(self):
# gh-10366
no_fields = np.dtype([])
arr_no_fields = np.empty(4, dtype=no_fields)
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
class TestComplexArray:
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
'[0.+infj]', '[0.+infj]', '[0.+infj]',
'[0.-infj]', '[0.-infj]', '[0.-infj]',
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
'[1.+infj]', '[1.+infj]', '[1.+infj]',
'[1.-infj]', '[1.-infj]', '[1.-infj]',
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
'[inf+infj]', '[inf+infj]', '[inf+infj]',
'[inf-infj]', '[inf-infj]', '[inf-infj]',
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
'[nan+infj]', '[nan+infj]', '[nan+infj]',
'[nan-infj]', '[nan-infj]', '[nan-infj]',
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
for res, val in zip(actual, wanted):
assert_equal(res, val)
class TestArray2String:
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
def test_unexpected_kwarg(self):
# ensure than an appropriate TypeError
# is raised when array2string receives
# an unexpected kwarg
with assert_raises_regex(TypeError, 'nonsense'):
np.array2string(np.array([1, 2, 3]),
nonsense=None)
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
np.set_printoptions(legacy='1.13')
try:
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
('NaT',) ('NaT',) ('NaT',)]""")
)
finally:
np.set_printoptions(legacy=False)
# same again, but with non-legacy behavior
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ( 'NaT',)
( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',)]""")
)
# and again, with timedeltas
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
def test_unstructured_void_repr(self):
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
assert_equal(repr(a),
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
assert_equal(eval(repr(a), vars(np)), a)
assert_equal(eval(repr(a[0]), vars(np)), a[0])
def test_edgeitems_kwarg(self):
# previously the global print options would be taken over the kwarg
arr = np.zeros(3, int)
assert_equal(
np.array2string(arr, edgeitems=1, threshold=0),
"[0 ... 0]"
)
def test_summarize_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ... 998 999 1000]'
assert_equal(str(A), strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_equal(repr(A), reprA)
def test_summarize_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_equal(repr(A), reprA)
def test_linewidth(self):
a = np.full(6, 1)
def make_str(a, width, **kw):
return np.array2string(a, separator="", max_line_width=width, **kw)
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
' 11]')
assert_equal(make_str(a, 8), '[111111]')
assert_equal(make_str(a, 7), '[11111\n'
' 1]')
assert_equal(make_str(a, 5), '[111\n'
' 111]')
b = a[None,None,:]
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
' 1]]]')
assert_equal(make_str(b, 12), '[[[111111]]]')
assert_equal(make_str(b, 9), '[[[111\n'
' 111]]]')
assert_equal(make_str(b, 8), '[[[11\n'
' 11\n'
' 11]]]')
def test_wide_element(self):
a = np.array(['xxxxx'])
assert_equal(
np.array2string(a, max_line_width=5),
"['xxxxx']"
)
assert_equal(
np.array2string(a, max_line_width=5, legacy='1.13'),
"[ 'xxxxx']"
)
def test_multiline_repr(self):
class MultiLine:
def __repr__(self):
return "Line 1\nLine 2"
a = np.array([[None, MultiLine()], [MultiLine(), None]])
assert_equal(
np.array2string(a),
'[[None Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2 None]]'
)
assert_equal(
np.array2string(a, max_line_width=5),
'[[None\n'
' Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2\n'
' None]]'
)
assert_equal(
repr(a),
'array([[None, Line 1\n'
' Line 2],\n'
' [Line 1\n'
' Line 2, None]], dtype=object)'
)
class MultiLineLong:
def __repr__(self):
return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
assert_equal(
repr(a),
'array([[None, Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 , None]], dtype=object)'
)
assert_equal(
np.array_repr(a, 20),
'array([[None,\n'
' Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ,\n'
' None]],\n'
' dtype=object)'
)
def test_nested_array_repr(self):
a = np.empty((2, 2), dtype=object)
a[0, 0] = np.eye(2)
a[0, 1] = np.eye(3)
a[1, 0] = None
a[1, 1] = np.ones((3, 1))
assert_equal(
repr(a),
'array([[array([[1., 0.],\n'
' [0., 1.]]), array([[1., 0., 0.],\n'
' [0., 1., 0.],\n'
' [0., 0., 1.]])],\n'
' [None, array([[1.],\n'
' [1.],\n'
' [1.]])]], dtype=object)'
)
@given(hynp.from_dtype(np.dtype("U")))
def test_any_text(self, text):
# This test checks that, given any value that can be represented in an
# array of dtype("U") (i.e. unicode string), ...
a = np.array([text, text, text])
# casting a list of them to an array does not e.g. truncate the value
assert_equal(a[0], text)
# and that np.array2string puts a newline in the expected location
expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
assert_equal(result, expected_repr)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# make sure we do not hold references to the array due to a recursive
# closure (gh-10620)
gc.disable()
a = np.arange(2)
r1 = sys.getrefcount(a)
np.array2string(a)
np.array2string(a)
r2 = sys.getrefcount(a)
gc.collect()
gc.enable()
assert_(r1 == r2)
class TestPrintOptions:
"""Test getting and setting global print options."""
def setup(self):
self.oldopts = np.get_printoptions()
def teardown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
assert_equal(str(np.array(u'café', '<U4')), u'café')
assert_equal(repr(np.array('café', '<U4')),
"array('café', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
assert_equal(str(a[0]), '([0, 0, 0],)')
assert_equal(repr(np.datetime64('2005-02-25')[...]),
"array('2005-02-25', dtype='datetime64[D]')")
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
"array(10, dtype='timedelta64[Y]')")
# repr of 0d arrays is affected by printoptions
x = np.array(1)
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
# str is unaffected
assert_equal(str(x), "1")
# check `style` arg raises
assert_warns(DeprecationWarning, np.array2string,
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
# gh-10934 style was broken in legacy mode, check it works
np.array2string(np.array(1.), legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
y = np.array([1., 2., -10.])
z = np.array([100., 2., -1.])
w = np.array([-100., 2., 1.])
assert_equal(repr(x), 'array([1., 2., 3.])')
assert_equal(repr(y), 'array([ 1., 2., -10.])')
assert_equal(repr(np.array(y[0])), 'array(1.)')
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
assert_equal(repr(z), 'array([100., 2., -1.])')
assert_equal(repr(w), 'array([-100., 2., 1.])')
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
x = np.array([np.inf, 100000, 1.1234])
y = np.array([np.inf, 100000, -1.1234])
z = np.array([np.inf, 1.1234, -1e120])
np.set_printoptions(precision=2)
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
def test_bool_spacing(self):
assert_equal(repr(np.array([True, True])),
'array([ True, True])')
assert_equal(repr(np.array([True, False])),
'array([ True, False])')
assert_equal(repr(np.array([True])),
'array([ True])')
assert_equal(repr(np.array(True)),
'array(True)')
assert_equal(repr(np.array(False)),
'array(False)')
def test_sign_spacing(self):
a = np.arange(4.)
b = np.array([1.234e9])
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array(1.)')
assert_equal(repr(b), 'array([1.234e+09])')
assert_equal(repr(np.array([0.])), 'array([0.])')
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign=' ')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array( 1.)')
assert_equal(repr(b), 'array([ 1.234e+09])')
assert_equal(repr(c),
"array([ 1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign='+')
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
assert_equal(repr(np.array(1.)), 'array(+1.)')
assert_equal(repr(b), 'array([+1.234e+09])')
assert_equal(repr(c),
"array([+1. +1.j , +1.12345679+1.12345679j])")
np.set_printoptions(legacy='1.13')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(b), 'array([ 1.23400000e+09])')
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
assert_equal(repr(np.array(1.)), 'array(1.0)')
assert_equal(repr(np.array([0.])), 'array([ 0.])')
assert_equal(repr(c),
"array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
# gh-10383
assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
def test_float_overflow_nowarn(self):
# make sure internal computations in FloatingFormat don't
# warn about overflow
repr(np.array([1e4, 0.1], dtype='f2'))
def test_sign_spacing_structured(self):
a = np.ones(2, dtype='<f,<f')
assert_equal(repr(a),
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
assert_equal(repr(a[0]), "(1., 1.)")
def test_floatmode(self):
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
y = np.array([0.2918820979355541, 0.5064172631089138,
0.2848750619642916, 0.4342965294660567,
0.7326538397312751, 0.3459503329096204,
0.0862072768214508, 0.39112753029631175],
dtype=np.float64)
z = np.arange(6, dtype=np.float16)/10
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
# also make sure 1e23 is right (is between two fp numbers)
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
# note: we construct w from the strings `1eXX` instead of doing
# `10.**arange(24)` because it turns out the two are not equivalent in
# python. On some architectures `1e23 != 10.**23`.
wp = np.array([1.234e1, 1e2, 1e123])
# unique mode
np.set_printoptions(floatmode='unique')
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
" 0.0862072768214508 , 0.39112753029631175])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w),
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
" 1.e+24])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.123456789+1.123456789j])")
# maxprec mode, precision=8
np.set_printoptions(floatmode='maxprec', precision=8)
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
# fixed mode, precision=4
np.set_printoptions(floatmode='fixed', precision=4)
assert_equal(repr(x),
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
" 0.2383, 0.4226], dtype=float16)")
assert_equal(repr(y),
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
assert_equal(repr(z),
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
assert_equal(repr(c),
"array([1.0000+1.0000j, 1.1235+1.1235j])")
# for larger precision, representation error becomes more apparent:
np.set_printoptions(floatmode='fixed', precision=8)
assert_equal(repr(z),
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
" 0.50000000], dtype=float16)")
# maxprec_equal mode, precision=8
np.set_printoptions(floatmode='maxprec_equal', precision=8)
assert_equal(repr(x),
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
assert_equal(repr(y),
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
# test unique special case (gh-18609)
a = np.float64.fromhex('-1p-97')
assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
def test_legacy_mode_scalars(self):
# in legacy mode, str of floats get truncated, and complex scalars
# use * for non-finite imaginary part
np.set_printoptions(legacy='1.13')
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
np.set_printoptions(legacy=False)
assert_equal(str(np.float64(1.123456789123456789)),
'1.1234567891234568')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
def test_legacy_stray_comma(self):
np.set_printoptions(legacy='1.13')
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
np.set_printoptions(legacy=False)
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
def test_dtype_linewidth_wrapping(self):
np.set_printoptions(linewidth=75)
assert_equal(repr(np.arange(10,20., dtype='f4')),
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
dtype=float32)"""))
styp = '<U4'
assert_equal(repr(np.ones(3, dtype=styp)),
"array(['1', '1', '1'], dtype='{}')".format(styp))
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
dtype='{}')""".format(styp)))
def test_linewidth_repr(self):
a = np.full(7, fill_value=2)
np.set_printoptions(linewidth=17)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2])""")
)
np.set_printoptions(linewidth=17, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2, 2])""")
)
a = np.full(8, fill_value=2)
np.set_printoptions(linewidth=18, legacy=False)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2, 2])""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2, 2,
2, 2, 2, 2])""")
)
def test_linewidth_str(self):
a = np.full(18, fill_value=2)
np.set_printoptions(linewidth=18)
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2
2 2]""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2]""")
)
def test_edgeitems(self):
np.set_printoptions(edgeitems=1, threshold=1)
a = np.arange(27).reshape((3, 3, 3))
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
b = np.zeros((3, 3, 1, 1))
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[0.]],
...,
[[0.]]],
...,
[[[0.]],
...,
[[0.]]]])""")
)
# 1.13 had extra trailing spaces, and was missing newlines
np.set_printoptions(legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[ 0.]],
...,
[[ 0.]]],
...,
[[[ 0.]],
...,
[[ 0.]]]])""")
)
def test_bad_args(self):
assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
assert_raises(TypeError, np.set_printoptions, threshold='1')
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
assert_raises(TypeError, np.set_printoptions, precision='1')
assert_raises(TypeError, np.set_printoptions, precision=1.5)
def test_unicode_object_array():
expected = "array(['é'], dtype=object)"
x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
class TestContextManager:
def test_ctx_mgr(self):
# test that context manager actually works
with np.printoptions(precision=2):
s = str(np.array([2.0]) / 3)
assert_equal(s, '[0.67]')
def test_ctx_mgr_restores(self):
# test that print options are actually restrored
opts = np.get_printoptions()
with np.printoptions(precision=opts['precision'] - 1,
linewidth=opts['linewidth'] - 4):
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_exceptions(self):
# test that print options are restored even if an exception is raised
opts = np.get_printoptions()
try:
with np.printoptions(precision=2, linewidth=11):
raise ValueError
except ValueError:
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_as_smth(self):
opts = {"precision": 2}
with np.printoptions(**opts) as ctx:
saved_opts = ctx.copy()
assert_equal({k: saved_opts[k] for k in opts}, opts)
|
py | 1a526905c8c70fa961e173438f60a34322a379fc | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FileSharesOperations(object):
"""FileSharesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
maxpagesize=None, # type: Optional[str]
filter=None, # type: Optional[str]
expand=None, # type: Optional[Union[str, "_models.ListSharesExpand"]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FileShareItems"]
"""Lists all shares.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param maxpagesize: Optional. Specified maximum number of shares that can be included in the
list.
:type maxpagesize: str
:param filter: Optional. When specified, only share names starting with the filter will be
listed.
:type filter: str
:param expand: Optional, used to expand the properties within share's properties.
:type expand: str or ~azure.mgmt.storage.v2021_01_01.models.ListSharesExpand
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileShareItems or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_01_01.models.FileShareItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShareItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if maxpagesize is not None:
query_parameters['$maxpagesize'] = self._serialize.query("maxpagesize", maxpagesize, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FileShareItems', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares'} # type: ignore
def create(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
file_share, # type: "_models.FileShare"
expand=None, # type: Optional[Union[str, "_models.PutSharesExpand"]]
**kwargs # type: Any
):
# type: (...) -> "_models.FileShare"
"""Creates a new share under the specified account as described by request body. The share
resource includes metadata and properties for that share. It does not include a list of the
files contained by the share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param file_share: Properties of the file share to create.
:type file_share: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:param expand: Optional, used to create a snapshot.
:type expand: str or ~azure.mgmt.storage.v2021_01_01.models.PutSharesExpand
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'shareName': self._serialize.url("share_name", share_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(file_share, 'FileShare')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FileShare', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
file_share, # type: "_models.FileShare"
**kwargs # type: Any
):
# type: (...) -> "_models.FileShare"
"""Updates share properties as specified in request body. Properties not mentioned in the request
will not be changed. Update fails if the specified share does not already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param file_share: Properties to update for the file share.
:type file_share: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'shareName': self._serialize.url("share_name", share_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(file_share, 'FileShare')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
expand="stats", # type: Optional[str]
x_ms_snapshot=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.FileShare"
"""Gets properties of a specified share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param expand: Optional, used to expand the properties within share's properties.
:type expand: str
:param x_ms_snapshot: Optional, used to retrieve properties of a snapshot.
:type x_ms_snapshot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'shareName': self._serialize.url("share_name", share_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if x_ms_snapshot is not None:
header_parameters['x-ms-snapshot'] = self._serialize.header("x_ms_snapshot", x_ms_snapshot, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
x_ms_snapshot=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes specified share under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param x_ms_snapshot: Optional, used to delete a snapshot.
:type x_ms_snapshot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'shareName': self._serialize.url("share_name", share_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if x_ms_snapshot is not None:
header_parameters['x-ms-snapshot'] = self._serialize.header("x_ms_snapshot", x_ms_snapshot, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
def restore(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
deleted_share, # type: "_models.DeletedShare"
**kwargs # type: Any
):
# type: (...) -> None
"""Restore a file share within a valid retention days if share soft delete is enabled.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param deleted_share:
:type deleted_share: ~azure.mgmt.storage.v2021_01_01.models.DeletedShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.restore.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'shareName': self._serialize.url("share_name", share_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(deleted_share, 'DeletedShare')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
restore.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/restore'} # type: ignore
|
py | 1a526b1f73a5025e01a2d36e8fe289c6779bda84 | #!/usr/bin/env python
# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2012-2017
# - Mario Lassnig <[email protected]>, 2013-2018
# - Thomas Beermann <[email protected]>, 2013-2018
# - Martin Barisits <[email protected]>, 2013-2017
# - Cedric Serfon <[email protected]>, 2014-2017
# - Joaquin Bogado <[email protected]>, 2018
# - Hannes Hansen <[email protected]>, 2018-2019
# - Andrew Lister <[email protected]>, 2019
# - Patrick Austin <[email protected]>, 2020
#
# PY3K COMPATIBLE
from __future__ import print_function
from logging import getLogger, StreamHandler, DEBUG
from json import dumps, loads
from traceback import format_exc
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
from web import application, ctx, data, header, Created, InternalError, OK, loadhook
from rucio.api.lock import get_replica_locks_for_rule_id
from rucio.api.rule import (add_replication_rule, delete_replication_rule, get_replication_rule, update_replication_rule,
reduce_replication_rule, list_replication_rule_history, list_replication_rule_full_history,
list_replication_rules, examine_replication_rule, move_replication_rule)
from rucio.common.exception import (InsufficientAccountLimit, RuleNotFound, AccessDenied, InvalidRSEExpression,
InvalidReplicationRule, RucioException, DataIdentifierNotFound, InsufficientTargetRSEs,
ReplicationRuleCreationTemporaryFailed, InvalidRuleWeight, StagingAreaRuleRequiresLifetime,
DuplicateRule, InvalidObject, AccountNotFound, RuleReplaceFailed, ScratchDiskLifetimeConflict,
ManualRuleApprovalBlocked, UnsupportedOperation)
from rucio.common.schema import get_schema_value
from rucio.common.utils import generate_http_error, render_json, APIEncoder
from rucio.web.rest.common import rucio_loadhook, check_accept_header_wrapper
LOGGER = getLogger("rucio.rule")
SH = StreamHandler()
SH.setLevel(DEBUG)
LOGGER.addHandler(SH)
URLS = ('/(.+)/locks', 'ReplicaLocks',
'/(.+)/reduce', 'ReduceRule',
'/(.+)/move', 'MoveRule',
'%s/history' % get_schema_value('SCOPE_NAME_REGEXP'), 'RuleHistoryFull',
'/(.+)/history', 'RuleHistory',
'/(.+)/analysis', 'RuleAnalysis',
'/', 'AllRule',
'/(.+)', 'Rule',)
class Rule:
""" REST APIs for replication rules. """
@check_accept_header_wrapper(['application/json'])
def GET(self, rule_id):
""" get rule information for given rule id.
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/json')
try:
estimate_ttc = False
json_data = data()
params = loads(json_data)
if 'estimate_ttc' in params:
estimate_ttc = params['estimate_ttc']
except ValueError:
estimate_ttc = False
try:
rule = get_replication_rule(rule_id, estimate_ttc=estimate_ttc, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
return render_json(**rule)
def PUT(self, rule_id):
"""
Update the replication rules locked flag .
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
500 InternalError
"""
json_data = data()
try:
params = loads(json_data)
options = params['options']
update_replication_rule(rule_id=rule_id, options=options, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except AccessDenied as error:
raise generate_http_error(401, 'AccessDenied', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except AccountNotFound as error:
raise generate_http_error(404, 'AccountNotFound', error.args[0])
except ScratchDiskLifetimeConflict as error:
raise generate_http_error(409, 'ScratchDiskLifetimeConflict', error.args[0])
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
except UnsupportedOperation as error:
raise generate_http_error(409, 'UnsupportedOperation', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
raise OK()
def DELETE(self, rule_id):
"""
Delete a new replication rule.
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
500 Internal Error
"""
json_data = data()
try:
purge_replicas = None
params = loads(json_data)
if 'purge_replicas' in params:
purge_replicas = params['purge_replicas']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
delete_replication_rule(rule_id=rule_id, purge_replicas=purge_replicas, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except AccessDenied as error:
raise generate_http_error(401, 'AccessDenied', error.args[0])
except UnsupportedOperation as error:
raise generate_http_error(401, 'UnsupportedOperation', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except Exception as error:
raise InternalError(error)
raise OK()
class AllRule:
""" REST APIs for all rules. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self):
"""
Return all rules of a given account.
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
406 Not Acceptable
:param scope: The scope name.
"""
header('Content-Type', 'application/x-json-stream')
filters = {}
if ctx.query:
params = dict(parse_qsl(ctx.query[1:]))
filters.update(params)
try:
for rule in list_replication_rules(filters=filters, vo=ctx.env.get('vo')):
yield dumps(rule, cls=APIEncoder) + '\n'
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except Exception as error:
print(format_exc())
raise InternalError(error)
def POST(self):
"""
Create a new replication rule.
HTTP Success:
201 Created
HTTP Error:
400 Bad Request
401 Unauthorized
404 Not Found
409 Conflict
500 Internal Error
"""
json_data = data()
try:
grouping, weight, lifetime, locked, subscription_id, source_replica_expression, activity, notify,\
purge_replicas, ignore_availability, comment, ask_approval, asynchronous, priority,\
split_container, meta = 'DATASET', None, None, False, None, None, None, None, False, False, None,\
False, False, 3, False, None
params = loads(json_data)
dids = params['dids']
account = params['account']
copies = params['copies']
rse_expression = params['rse_expression']
if 'grouping' in params:
grouping = params['grouping']
if 'weight' in params:
weight = params['weight']
if 'lifetime' in params:
lifetime = params['lifetime']
if 'locked' in params:
locked = params['locked']
if 'subscription_id' in params:
subscription_id = params['subscription_id']
if 'source_replica_expression' in params:
source_replica_expression = params['source_replica_expression']
if 'activity' in params:
activity = params['activity']
if 'notify' in params:
notify = params['notify']
if 'purge_replicas' in params:
purge_replicas = params['purge_replicas']
if 'ignore_availability' in params:
ignore_availability = params['ignore_availability']
if 'comment' in params:
comment = params['comment']
if 'ask_approval' in params:
ask_approval = params['ask_approval']
if 'asynchronous' in params:
asynchronous = params['asynchronous']
if 'priority' in params:
priority = params['priority']
if 'split_container' in params:
split_container = params['split_container']
if 'meta' in params:
meta = params['meta']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
rule_ids = add_replication_rule(dids=dids,
copies=copies,
rse_expression=rse_expression,
weight=weight,
lifetime=lifetime,
grouping=grouping,
account=account,
locked=locked,
subscription_id=subscription_id,
source_replica_expression=source_replica_expression,
activity=activity,
notify=notify,
purge_replicas=purge_replicas,
ignore_availability=ignore_availability,
comment=comment,
ask_approval=ask_approval,
asynchronous=asynchronous,
priority=priority,
split_container=split_container,
meta=meta,
issuer=ctx.env.get('issuer'),
vo=ctx.env.get('vo'))
# TODO: Add all other error cases here
except InvalidReplicationRule as error:
raise generate_http_error(409, 'InvalidReplicationRule', error.args[0])
except DuplicateRule as error:
raise generate_http_error(409, 'DuplicateRule', error.args[0])
except InsufficientTargetRSEs as error:
raise generate_http_error(409, 'InsufficientTargetRSEs', error.args[0])
except InsufficientAccountLimit as error:
raise generate_http_error(409, 'InsufficientAccountLimit', error.args[0])
except InvalidRSEExpression as error:
raise generate_http_error(409, 'InvalidRSEExpression', error.args[0])
except DataIdentifierNotFound as error:
raise generate_http_error(404, 'DataIdentifierNotFound', error.args[0])
except ReplicationRuleCreationTemporaryFailed as error:
raise generate_http_error(409, 'ReplicationRuleCreationTemporaryFailed', error.args[0])
except InvalidRuleWeight as error:
raise generate_http_error(409, 'InvalidRuleWeight', error.args[0])
except StagingAreaRuleRequiresLifetime as error:
raise generate_http_error(409, 'StagingAreaRuleRequiresLifetime', error.args[0])
except ScratchDiskLifetimeConflict as error:
raise generate_http_error(409, 'ScratchDiskLifetimeConflict', error.args[0])
except ManualRuleApprovalBlocked as error:
raise generate_http_error(409, 'ManualRuleApprovalBlocked', error.args[0])
except InvalidObject as error:
raise generate_http_error(409, 'InvalidObject', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
raise InternalError(error)
raise Created(dumps(rule_ids))
class ReplicaLocks:
""" REST APIs for replica locks. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self, rule_id):
""" get locks for a given rule_id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/x-json-stream')
try:
locks = get_replica_locks_for_rule_id(rule_id)
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
for lock in locks:
yield dumps(lock, cls=APIEncoder) + '\n'
class ReduceRule:
""" REST APIs for reducing rules. """
def POST(self, rule_id):
"""
Reduce a replication rule.
HTTP Success:
201 Created
HTTP Error:
400 Bad Request
401 Unauthorized
404 Not Found
409 Conflict
500 Internal Error
"""
json_data = data()
try:
exclude_expression = None
params = loads(json_data)
copies = params['copies']
if 'exclude_expression' in params:
exclude_expression = params['exclude_expression']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
rule_ids = reduce_replication_rule(rule_id=rule_id,
copies=copies,
exclude_expression=exclude_expression,
issuer=ctx.env.get('issuer'),
vo=ctx.env.get('vo'))
# TODO: Add all other error cases here
except RuleReplaceFailed as error:
raise generate_http_error(409, 'RuleReplaceFailed', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(error)
print(format_exc())
raise InternalError(error)
raise Created(dumps(rule_ids))
class MoveRule:
""" REST APIs for moving rules. """
def POST(self, rule_id):
"""
Move a replication rule.
HTTP Success:
201 Created
HTTP Error:
400 Bad Request
401 Unauthorized
404 Not Found
409 Conflict
500 Internal Error
"""
json_data = data()
try:
params = loads(json_data)
rule_id = params['rule_id']
rse_expression = params['rse_expression']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
rule_ids = move_replication_rule(rule_id=rule_id,
rse_expression=rse_expression,
issuer=ctx.env.get('issuer'),
vo=ctx.env.get('vo'))
except RuleReplaceFailed as error:
raise generate_http_error(409, 'RuleReplaceFailed', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(error)
print(format_exc())
raise InternalError(error)
raise Created(dumps(rule_ids))
class RuleHistory:
""" REST APIs for rule history. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self, rule_id):
""" get history for a given rule_id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/x-json-stream')
try:
history = list_replication_rule_history(rule_id, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
for hist in history:
yield dumps(hist, cls=APIEncoder) + '\n'
class RuleHistoryFull:
""" REST APIs for rule history for DIDs. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self, scope, name):
""" get history for a given DID.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/x-json-stream')
try:
history = list_replication_rule_full_history(scope, name, vo=ctx.env.get('vo'))
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
for hist in history:
yield dumps(hist, cls=APIEncoder) + '\n'
class RuleAnalysis:
""" REST APIs for rule analysis. """
@check_accept_header_wrapper(['application/json'])
def GET(self, rule_id):
""" get analysis for given rule.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/json')
try:
analysis = examine_replication_rule(rule_id, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
return render_json(**analysis)
"""----------------------
Web service startup
----------------------"""
APP = application(URLS, globals())
APP.add_processor(loadhook(rucio_loadhook))
application = APP.wsgifunc()
|
py | 1a526c17db22c482e8cb7ada3bd8c715a1be7969 | d = {'daniel':'555-5555', 'anna':'555-7777', 'linus':'555-6666'}
#for key,value in d.items():
# print(key)
# print(value)
d['bob'] = '555-2222'
for phone_number in d.vaules():
print(phone_number)
print(len(d))
if 'daniel' in d:
print(d['daniel'])
else:
print('Not there!')
print(d['linus'])
for name in d.keys():
print(name)
|
py | 1a526e8fa7fc50435d929a969d1062d862ce5842 | import os
import pickle
import copy
import json
from collections import defaultdict
import numpy as np
import random
import torch
from torch_geometric.data import Data, Dataset, Batch
from torch_geometric.utils import to_networkx
from torch_scatter import scatter
#from torch.utils.data import Dataset
import rdkit
from rdkit import Chem
from rdkit.Chem.rdchem import Mol, HybridizationType, BondType
from rdkit import RDLogger
import networkx as nx
from tqdm import tqdm
# import sidechainnet as scn
RDLogger.DisableLog('rdApp.*')
from .chem import BOND_TYPES, mol_to_smiles
def prepare_pdb2(scn_dir, data_path):
# step 1: filter and save pdb file.
train_data = []
cnt_fail = 0
def get_num_plusseg(msk):
tmp = [0]
for i in range(1, len(msk)):
if msk[i] == msk[i-1]:
tmp.append(0)
else:
tmp.append(1)
s = sum(tmp)
if msk[0] == '-':
return (s + 1) // 2
else:
return (s // 2) + 1
def get_plus_rate(msk):
cnt = sum([1 if x == '+' else 0 for x in msk])
return cnt / len(msk)
d = scn.load(casp_version=12, thinning=30, scn_dir=scn_dir)
raw_data = d['train']
mask = raw_data['msk']
n_raw_data = len(mask)
cnt_seg = 0
cnt_success = 0
for i in tqdm(range(n_raw_data)):
if get_plus_rate(mask[i]) > 0.5 and get_num_plusseg(mask[i]) == 1:
cnt_seg += 1
mask_ = [1 if _ == '+' else 0 for _ in mask[i]]
if sum(mask_) < 200:
cnt_success += 1
seq = raw_data['seq'][i]
crd = raw_data['crd'][i]
name = raw_data['ids'][i]
mol = scn.StructureBuilder(seq, crd)
mol.to_pdb('./tmp.pdb')
data = pdb_to_data('./tmp.pdb', name)
if data is not None:
train_data.append(data)
else:
cnt_fail += 1
print('total n_raw_data: %d, cnt_seg: %d, cnt_success: %d' % (n_raw_data, cnt_seg, cnt_success))
n_data = len(train_data)
print('number of train samples: %d | number of fails: %d' % (n_data, cnt_fail))
os.makedirs(os.path.join(data_path), exist_ok=True)
with open(os.path.join(data_path, 'train_data_%dk.pkl' % (n_data // 1000)), "wb") as fout:
pickle.dump(train_data, fout)
print('save train %dk done' % (n_data // 1000))
def prepare_pdblarge(scn_dir, data_path):
# step 1: filter and save pdb file.
train_data = []
cnt_fail = 0
max_residue = 0
d = scn.load(casp_version=12, thinning=30, scn_dir=scn_dir)
raw_data = d['train']
mask = raw_data['msk']
n_raw_data = len(mask)
cnt_seg = 0
cnt_success = 0
for i in tqdm(range(n_raw_data)):
# if get_plus_rate(mask[i]) > 0.5 and get_num_plusseg(mask[i]) == 1:
if True:
cnt_seg += 1
mask_ = [1 if _ == '+' else 0 for _ in mask[i]]
if sum(mask_) < 400:
cnt_success += 1
seq = raw_data['seq'][i]
crd = raw_data['crd'][i]
name = raw_data['ids'][i]
mol = scn.StructureBuilder(seq, crd)
mol.to_pdb('./tmp.pdb')
data = pdb_to_data('./tmp.pdb', name)
if data is not None:
train_data.append(data)
max_residue = max(max_residue, sum(mask_))
else:
cnt_fail += 1
print('total n_raw_data: %d, cnt_seg: %d, cnt_success: %d, max_residue: %d' % (n_raw_data, cnt_seg, cnt_success, max_residue))
n_data = len(train_data)
print('number of train samples: %d | number of fails: %d' % (n_data, cnt_fail))
os.makedirs(os.path.join(data_path), exist_ok=True)
with open(os.path.join(data_path, 'train_data_%dk.pkl' % (n_data // 1000)), "wb") as fout:
pickle.dump(train_data, fout)
print('save train %dk done' % (n_data // 1000))
def prepare_pdb_valtest(scn_dir, data_path):
# step 1: filter and save pdb file.
val_data = []
test_data = []
all_data = []
cnt_fail = 0
max_residue = 0
n_raw_data = 0
cnt_success = 0
d = scn.load(casp_version=12, thinning=30, scn_dir=scn_dir)
fetch_dict = ['test', 'valid-10', 'valid-20', 'valid-30', 'valid-40', 'valid-50', 'valid-70', 'valid-90']
for dict_name in fetch_dict:
raw_data = d[dict_name]
mask = raw_data['msk']
n_raw_data += len(mask)
cnt_seg = 0
cnt_success = 0
for i in tqdm(range(len(mask))):
# if get_plus_rate(mask[i]) > 0.5 and get_num_plusseg(mask[i]) == 1:
if True:
mask_ = [1 if _ == '+' else 0 for _ in mask[i]]
if sum(mask_) < 400:
seq = raw_data['seq'][i]
crd = raw_data['crd'][i]
name = raw_data['ids'][i]
mol = scn.StructureBuilder(seq, crd)
mol.to_pdb('./tmp.pdb')
data = pdb_to_data('./tmp.pdb', name)
if data is not None:
cnt_success += 1
all_data.append(data)
max_residue = max(max_residue, sum(mask_))
else:
cnt_fail += 1
print('total n_raw_data: %d, cnt_success: %d, max_residue: %d' % (n_raw_data, cnt_success, max_residue))
random.shuffle(all_data)
n_val = len(all_data) // 2
n_test = len(all_data) - n_val
print('number of val samples: %d | number of test samples: %d | number of fails: %d' % (n_val, n_test, cnt_fail))
os.makedirs(os.path.join(data_path), exist_ok=True)
with open(os.path.join(data_path, 'val_data_%dk.pkl' % (n_val // 1000)), "wb") as fout:
pickle.dump(all_data[:n_val], fout)
print('save val %dk done' % (n_val // 1000))
with open(os.path.join(data_path, 'test_data_%dk.pkl' % (n_test // 1000)), "wb") as fout:
pickle.dump(all_data[n_val:], fout)
print('save test %dk done' % (n_test // 1000))
def pdb_to_data(pdb_path, name):
mol = Chem.rdmolfiles.MolFromPDBFile(pdb_path)
if mol is None:
return None
with open(pdb_path, 'r') as f:
pdb_infos = f.readlines()
pdb_infos = pdb_infos[1:-1]
assert mol.GetNumConformers() == 1
N = mol.GetNumAtoms()
# name = pdb_path.split('/')[-1].split('.')[0]
pos = torch.tensor(mol.GetConformer(0).GetPositions(), dtype=torch.float32)
atomic_number = []
aromatic = []
is_sidechain = []
is_alpha = []
atom2res = []
sp = []
sp2 = []
sp3 = []
num_hs = []
for index, atom in enumerate(mol.GetAtoms()):
atomic_number.append(atom.GetAtomicNum())
aromatic.append(1 if atom.GetIsAromatic() else 0)
hybridization = atom.GetHybridization()
sp.append(1 if hybridization == HybridizationType.SP else 0)
sp2.append(1 if hybridization == HybridizationType.SP2 else 0)
sp3.append(1 if hybridization == HybridizationType.SP3 else 0)
info = atom.GetPDBResidueInfo()
ref_info = pdb_infos[index]
ref_info = ref_info.split()
assert info.GetResidueName().strip() == ref_info[3]
assert info.GetName().strip() == ref_info[2]
assert info.GetResidueNumber() == int(ref_info[4])
if info.GetName().strip() == 'CA':
is_alpha.append(1)
else:
is_alpha.append(0)
if info.GetName().strip() in ['N', 'CA', 'C', 'O']:
is_sidechain.append(0)
else:
is_sidechain.append(1)
atom2res.append(info.GetResidueNumber() - 1)
num_res = len(set(atom2res))
atom2res = np.array(atom2res)
atom2res -= atom2res.min()
atom2res = torch.tensor(atom2res, dtype=torch.long)
is_sidechain = torch.tensor(is_sidechain).bool()
is_alpha = torch.tensor(is_alpha).bool()
dummy_index = torch.arange(pos.size(0))
alpha_index = dummy_index[is_alpha]
res2alpha_index = -torch.ones(5000, dtype=torch.long)
res2alpha_index[atom2res[is_alpha]] = alpha_index
atom2alpha_index = res2alpha_index[atom2res]
if is_sidechain.sum().item() == 0: # protein built solely on GLY can not be used for sidechain prediction
return None
# assert (4 * num_res == (len(is_sidechain) - sum(is_sidechain))),(4 * num_res, (len(is_sidechain) - sum(is_sidechain)))
z = torch.tensor(atomic_number, dtype=torch.long)
row, col, edge_type = [], [], []
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]
edge_index = torch.tensor([row, col], dtype=torch.long)
edge_type = torch.tensor(edge_type)
if edge_index.size(1) == 0: # only alpha carbon
return None
perm = (edge_index[0] * N + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_type = edge_type[perm]
row, col = edge_index
hs = (z == 1).to(torch.float32)
num_hs = scatter(hs[row], col, dim_size=N, reduce='sum').tolist()
# smiles = Chem.MolToSmiles(mol)
data = Data(atom_type=z, pos=pos, edge_index=edge_index, edge_type=edge_type, is_alpha=is_alpha,
rdmol=copy.deepcopy(mol), name=name, is_sidechain=is_sidechain, atom2res=atom2res, atom2alpha_index=atom2alpha_index)
#data.nx = to_networkx(data, to_undirected=True)
return data
def rdmol_to_data(mol:Mol, smiles=None, data_cls=Data):
assert mol.GetNumConformers() == 1
N = mol.GetNumAtoms()
pos = torch.tensor(mol.GetConformer(0).GetPositions(), dtype=torch.float32)
atomic_number = []
aromatic = []
sp = []
sp2 = []
sp3 = []
num_hs = []
for atom in mol.GetAtoms():
atomic_number.append(atom.GetAtomicNum())
aromatic.append(1 if atom.GetIsAromatic() else 0)
hybridization = atom.GetHybridization()
sp.append(1 if hybridization == HybridizationType.SP else 0)
sp2.append(1 if hybridization == HybridizationType.SP2 else 0)
sp3.append(1 if hybridization == HybridizationType.SP3 else 0)
z = torch.tensor(atomic_number, dtype=torch.long)
row, col, edge_type = [], [], []
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]
edge_index = torch.tensor([row, col], dtype=torch.long)
edge_type = torch.tensor(edge_type)
perm = (edge_index[0] * N + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_type = edge_type[perm]
row, col = edge_index
hs = (z == 1).to(torch.float32)
num_hs = scatter(hs[row], col, dim_size=N, reduce='sum').tolist()
if smiles is None:
smiles = Chem.MolToSmiles(mol)
data = data_cls(atom_type=z, pos=pos, edge_index=edge_index, edge_type=edge_type,
rdmol=copy.deepcopy(mol), smiles=smiles)
#data.nx = to_networkx(data, to_undirected=True)
return data
class MolClusterData(Data):
def __inc__(self, key, value):
if key == 'subgraph_index':
return self.subgraph_index.max().item() + 1
else:
return super().__inc__(key, value)
def rdmol_cluster_to_data(mol:Mol, smiles=None):
data = rdmol_to_data(mol, smiles, data_cls=MolClusterData)
data.subgraph_index = torch.zeros([data.atom_type.size(0)], dtype=torch.long)
for i, subgraph in enumerate(nx.connected_components(to_networkx(data, to_undirected=True))):
data.subgraph_index[list(subgraph)] = i
return data
def preprocess_iso17_dataset(base_path):
train_path = os.path.join(base_path, 'iso17_split-0_train.pkl')
test_path = os.path.join(base_path, 'iso17_split-0_test.pkl')
with open(train_path, 'rb') as fin:
raw_train = pickle.load(fin)
with open(test_path, 'rb') as fin:
raw_test = pickle.load(fin)
smiles_list_train = [mol_to_smiles(mol) for mol in raw_train]
smiles_set_train = list(set(smiles_list_train))
smiles_list_test = [mol_to_smiles(mol) for mol in raw_test]
smiles_set_test = list(set(smiles_list_test))
print('preprocess train...')
all_train = []
for i in tqdm(range(len(raw_train))):
smiles = smiles_list_train[i]
data = rdmol_to_data(raw_train[i], smiles=smiles)
all_train.append(data)
print('Train | find %d molecules with %d confs' % (len(smiles_set_train), len(all_train)))
print('preprocess test...')
all_test = []
for i in tqdm(range(len(raw_test))):
smiles = smiles_list_test[i]
data = rdmol_to_data(raw_test[i], smiles=smiles)
all_test.append(data)
print('Test | find %d molecules with %d confs' % (len(smiles_set_test), len(all_test)))
return all_train, all_test
def preprocess_GEOM_dataset(base_path, dataset_name, max_conf=5, train_size=0.8, max_size=9999999999, seed=None):
# set random seed
if seed is None:
seed = 2021
np.random.seed(seed)
random.seed(seed)
# read summary file
assert dataset_name in ['qm9', 'drugs']
summary_path = os.path.join(base_path, 'summary_%s.json' % dataset_name)
with open(summary_path, 'r') as f:
summ = json.load(f)
# filter valid pickle path
smiles_list = []
pickle_path_list = []
num_mols = 0
num_confs = 0
for smiles, meta_mol in tqdm(summ.items()):
u_conf = meta_mol.get('uniqueconfs')
if u_conf is None:
continue
pickle_path = meta_mol.get('pickle_path')
if pickle_path is None:
continue
num_mols += 1
num_confs += min(max_conf, u_conf)
smiles_list.append(smiles)
pickle_path_list.append(pickle_path)
if num_mols >= max_size:
break
print('pre-filter: find %d molecules with %d confs' % (num_mols, num_confs))
# 1. select maximal 'max_conf' confs of each qm9 molecule
# 2. split the dataset based on 2d-structure, i.e., test on unseen graphs
train_data, val_data, test_data = [], [], []
val_size = test_size = (1. - train_size) / 2
num_mols = np.zeros(4, dtype=int) # (tot, train, val, test)
num_confs = np.zeros(4, dtype=int) # (tot, train, val, test)
'''
# mol.get('uniqueconfs') != len(mol.get('conformers'))
with open(os.path.join(base_path, pickle_path_list[1878]), 'rb') as fin:
mol = pickle.load(fin)
print(mol.get('uniqueconfs'), len(mol.get('conformers')))
print(mol.get('conformers')[0]['rd_mol'].GetConformer(0).GetPositions())
print(mol.get('conformers')[1]['rd_mol'].GetConformer(0).GetPositions())
return
'''
bad_case = 0
for i in tqdm(range(len(pickle_path_list))):
with open(os.path.join(base_path, pickle_path_list[i]), 'rb') as fin:
mol = pickle.load(fin)
if mol.get('uniqueconfs') > len(mol.get('conformers')):
bad_case += 1
continue
if mol.get('uniqueconfs') <= 0:
bad_case += 1
continue
datas = []
smiles = mol.get('smiles')
if mol.get('uniqueconfs') <= max_conf:
# use all confs
conf_ids = np.arange(mol.get('uniqueconfs'))
else:
# filter the most probable 'max_conf' confs
all_weights = np.array([_.get('boltzmannweight', -1.) for _ in mol.get('conformers')])
descend_conf_id = (-all_weights).argsort()
conf_ids = descend_conf_id[:max_conf]
for conf_id in conf_ids:
conf_meta = mol.get('conformers')[conf_id]
data = rdmol_to_data(conf_meta.get('rd_mol'))
labels = {
'totalenergy': conf_meta['totalenergy'],
'boltzmannweight': conf_meta['boltzmannweight'],
}
for k, v in labels.items():
data[k] = torch.tensor([v], dtype=torch.float32)
datas.append(data)
# split
eps = np.random.rand()
if eps <= train_size:
train_data.extend(datas)
num_mols += [1, 1, 0, 0]
num_confs += [len(datas), len(datas), 0, 0]
elif eps <= train_size + val_size:
val_data.extend(datas)
num_mols += [1, 0, 1, 0]
num_confs += [len(datas), 0, len(datas), 0]
else:
test_data.extend(datas)
num_mols += [1, 0, 0, 1]
num_confs += [len(datas), 0, 0, len(datas)]
print('post-filter: find %d molecules with %d confs' % (num_mols[0], num_confs[0]))
print('train size: %d molecules with %d confs' % (num_mols[1], num_confs[1]))
print('val size: %d molecules with %d confs' % (num_mols[2], num_confs[2]))
print('test size: %d molecules with %d confs' % (num_mols[3], num_confs[3]))
print('bad case: %d' % bad_case)
print('done!')
return train_data, val_data, test_data
def preprocess_GEOM_dataset_with_fixed_num_conf(base_path, dataset_name, conf_per_mol=5, train_size=0.8, tot_mol_size=50000, seed=None):
"""
base_path: directory that contains GEOM dataset
dataset_name: dataset name, should be in [qm9, drugs]
conf_per_mol: keep mol that has at least conf_per_mol confs, and sampling the most probable conf_per_mol confs
train_size ratio, val = test = (1-train_size) / 2
tot_mol_size: max num of mols. The total number of final confs should be tot_mol_size * conf_per_mol
seed: rand seed for RNG
"""
# set random seed
if seed is None:
seed = 2021
np.random.seed(seed)
random.seed(seed)
# read summary file
assert dataset_name in ['qm9', 'drugs']
summary_path = os.path.join(base_path, 'summary_%s.json' % dataset_name)
with open(summary_path, 'r') as f:
summ = json.load(f)
# filter valid pickle path
smiles_list = []
pickle_path_list = []
num_mols = 0
num_confs = 0
for smiles, meta_mol in tqdm(summ.items()):
u_conf = meta_mol.get('uniqueconfs')
if u_conf is None:
continue
pickle_path = meta_mol.get('pickle_path')
if pickle_path is None:
continue
if u_conf < conf_per_mol:
continue
num_mols += 1
num_confs += conf_per_mol
smiles_list.append(smiles)
pickle_path_list.append(pickle_path)
# we need do a shuffle and sample first max_size items here.
#if num_mols >= max_size:
# break
random.shuffle(pickle_path_list)
assert len(pickle_path_list) >= tot_mol_size, 'the length of all available mols is %d, which is smaller than tot mol size %d' % (len(pickle_path_list), tot_mol_size)
pickle_path_list = pickle_path_list[:tot_mol_size]
print('pre-filter: find %d molecules with %d confs, use %d molecules with %d confs' % (num_mols, num_confs, tot_mol_size, tot_mol_size*conf_per_mol))
# 1. select maximal 'max_conf' confs of each qm9 molecule
# 2. split the dataset based on 2d-structure, i.e., test on unseen graphs
train_data, val_data, test_data = [], [], []
val_size = test_size = (1. - train_size) / 2
# generate train, val, test split indexes
split_indexes = list(range(tot_mol_size))
random.shuffle(split_indexes)
index2split = {}
#print(int(len(split_indexes) * train_size), int(len(split_indexes) * (train_size + val_size)), len(split_indexes))
for i in range(0, int(len(split_indexes) * train_size)):
index2split[split_indexes[i]] = 'train'
for i in range(int(len(split_indexes) * train_size), int(len(split_indexes) * (train_size + val_size))):
index2split[split_indexes[i]] = 'val'
for i in range(int(len(split_indexes) * (train_size + val_size)), len(split_indexes)):
index2split[split_indexes[i]] = 'test'
num_mols = np.zeros(4, dtype=int) # (tot, train, val, test)
num_confs = np.zeros(4, dtype=int) # (tot, train, val, test)
bad_case = 0
for i in tqdm(range(len(pickle_path_list))):
with open(os.path.join(base_path, pickle_path_list[i]), 'rb') as fin:
mol = pickle.load(fin)
if mol.get('uniqueconfs') > len(mol.get('conformers')):
bad_case += 1
continue
if mol.get('uniqueconfs') <= 0:
bad_case += 1
continue
datas = []
smiles = mol.get('smiles')
if mol.get('uniqueconfs') == conf_per_mol:
# use all confs
conf_ids = np.arange(mol.get('uniqueconfs'))
else:
# filter the most probable 'max_conf' confs
all_weights = np.array([_.get('boltzmannweight', -1.) for _ in mol.get('conformers')])
descend_conf_id = (-all_weights).argsort()
conf_ids = descend_conf_id[:conf_per_mol]
for conf_id in conf_ids:
conf_meta = mol.get('conformers')[conf_id]
data = rdmol_to_data(conf_meta.get('rd_mol'), smiles=smiles)
labels = {
'totalenergy': conf_meta['totalenergy'],
'boltzmannweight': conf_meta['boltzmannweight'],
}
for k, v in labels.items():
data[k] = torch.tensor([v], dtype=torch.float32)
data['idx'] = torch.tensor([i], dtype=torch.long)
datas.append(data)
assert len(datas) == conf_per_mol
# split
'''
eps = np.random.rand()
if eps <= train_size:
train_data.extend(datas)
num_mols += [1, 1, 0, 0]
num_confs += [len(datas), len(datas), 0, 0]
elif eps <= train_size + val_size:
val_data.extend(datas)
num_mols += [1, 0, 1, 0]
num_confs += [len(datas), 0, len(datas), 0]
else:
test_data.extend(datas)
num_mols += [1, 0, 0, 1]
num_confs += [len(datas), 0, 0, len(datas)]
'''
if index2split[i] == 'train':
train_data.extend(datas)
num_mols += [1, 1, 0, 0]
num_confs += [len(datas), len(datas), 0, 0]
elif index2split[i] == 'val':
val_data.extend(datas)
num_mols += [1, 0, 1, 0]
num_confs += [len(datas), 0, len(datas), 0]
elif index2split[i] == 'test':
test_data.extend(datas)
num_mols += [1, 0, 0, 1]
num_confs += [len(datas), 0, 0, len(datas)]
else:
raise ValueError('unknown index2split value.')
print('post-filter: find %d molecules with %d confs' % (num_mols[0], num_confs[0]))
print('train size: %d molecules with %d confs' % (num_mols[1], num_confs[1]))
print('val size: %d molecules with %d confs' % (num_mols[2], num_confs[2]))
print('test size: %d molecules with %d confs' % (num_mols[3], num_confs[3]))
print('bad case: %d' % bad_case)
print('done!')
return train_data, val_data, test_data, index2split
def get_test_set_with_large_num_conf(base_path, dataset_name, block, tot_mol_size=1000, seed=None, confmin=50, confmax=500):
"""
base_path: directory that contains GEOM dataset
dataset_name: dataset name, should be in [qm9, drugs]
conf_per_mol: keep mol that has at least conf_per_mol confs, and sampling the most probable conf_per_mol confs
train_size ratio, val = test = (1-train_size) / 2
tot_mol_size: max num of mols. The total number of final confs should be tot_mol_size * conf_per_mol
seed: rand seed for RNG
"""
#block smiles in train / val
block_smiles = defaultdict(int)
for i in range(len(block)):
block_smiles[block[i].smiles] = 1
# set random seed
if seed is None:
seed = 2021
np.random.seed(seed)
random.seed(seed)
# read summary file
assert dataset_name in ['qm9', 'drugs']
summary_path = os.path.join(base_path, 'summary_%s.json' % dataset_name)
with open(summary_path, 'r') as f:
summ = json.load(f)
# filter valid pickle path
smiles_list = []
pickle_path_list = []
num_mols = 0
num_confs = 0
for smiles, meta_mol in tqdm(summ.items()):
u_conf = meta_mol.get('uniqueconfs')
if u_conf is None:
continue
pickle_path = meta_mol.get('pickle_path')
if pickle_path is None:
continue
if u_conf < confmin or u_conf > confmax:
continue
if block_smiles[smiles] == 1:
continue
num_mols += 1
num_confs += u_conf
smiles_list.append(smiles)
pickle_path_list.append(pickle_path)
# we need do a shuffle and sample first max_size items here.
#if num_mols >= tot_mol_size:
# break
random.shuffle(pickle_path_list)
assert len(pickle_path_list) >= tot_mol_size, 'the length of all available mols is %d, which is smaller than tot mol size %d' % (len(pickle_path_list), tot_mol_size)
pickle_path_list = pickle_path_list[:tot_mol_size]
print('pre-filter: find %d molecules with %d confs' % (num_mols, num_confs))
bad_case = 0
all_test_data = []
num_valid_mol = 0
num_valid_conf = 0
for i in tqdm(range(len(pickle_path_list))):
with open(os.path.join(base_path, pickle_path_list[i]), 'rb') as fin:
mol = pickle.load(fin)
if mol.get('uniqueconfs') > len(mol.get('conformers')):
bad_case += 1
continue
if mol.get('uniqueconfs') <= 0:
bad_case += 1
continue
datas = []
smiles = mol.get('smiles')
conf_ids = np.arange(mol.get('uniqueconfs'))
for conf_id in conf_ids:
conf_meta = mol.get('conformers')[conf_id]
data = rdmol_to_data(conf_meta.get('rd_mol'), smiles=smiles)
labels = {
'totalenergy': conf_meta['totalenergy'],
'boltzmannweight': conf_meta['boltzmannweight'],
}
for k, v in labels.items():
data[k] = torch.tensor([v], dtype=torch.float32)
data['idx'] = torch.tensor([i], dtype=torch.long)
datas.append(data)
all_test_data.extend(datas)
num_valid_mol += 1
num_valid_conf += len(datas)
print('poster-filter: find %d molecules with %d confs' % (num_valid_mol, num_valid_conf))
return all_test_data
class ConformationDataset(Dataset):
def __init__(self, path, transform=None):
super().__init__()
with open(path, 'rb') as f:
self.data = pickle.load(f)
self.transform = transform
self.atom_types = self._atom_types()
self.edge_types = self._edge_types()
def __getitem__(self, idx):
data = self.data[idx].clone()
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self):
return len(self.data)
def _atom_types(self):
"""All atom types."""
atom_types = set()
for graph in self.data:
atom_types.update(graph.atom_type.tolist())
return sorted(atom_types)
def _edge_types(self):
"""All edge types."""
edge_types = set()
for graph in self.data:
edge_types.update(graph.edge_type.tolist())
return sorted(edge_types)
class SidechainConformationDataset(ConformationDataset):
def __init__(self, path, transform=None, cutoff=10., max_residue=5000, fix_subgraph=False):
super().__init__(path, transform)
self.cutoff = cutoff
self.max_residue = max_residue
self.fix_subgraph = fix_subgraph
def __getitem__(self, idx):
data = self.data[idx].clone()
""" Subgraph sampling
1. sampling an atom from the backbone (residue)
2. Find all neighboring atoms within a cutoff
3. extend atoms to ensure the completeness of each residue
4. remap the index for subgraph
"""
is_sidechain = data.is_sidechain
pos = data.pos
edge_index = data.edge_index
atom2res = data.atom2res
dummy_index = torch.arange(pos.size(0))
backbone_index = dummy_index[~is_sidechain]
#stop=False
#while not stop:
# step 1
if self.fix_subgraph:
center_atom_index = backbone_index[backbone_index.size(0) // 2].view(1,)
else:
center_atom_index = backbone_index[torch.randint(low=0, high=backbone_index.size(0), size=(1, ))] # (1, )
pos_center_atom = pos[center_atom_index] # (1, 3)
# step 2
distance = (pos_center_atom - pos).norm(dim=-1)
mask = (distance <= self.cutoff)
# step 3
is_keep_residue = scatter(mask, atom2res, dim=-1, dim_size=self.max_residue, reduce='sum') # (max_residue, )
is_keep_atom = is_keep_residue[atom2res]
is_keep_edge = (is_keep_atom[edge_index[0]]) & (is_keep_atom[edge_index[1]])
# step 4
mapping = -torch.ones(pos.size(0), dtype=torch.long)
keep_index = dummy_index[is_keep_atom]
mapping[keep_index] = torch.arange(keep_index.size(0))
if (data.is_sidechain[is_keep_atom]).sum().item() == 0:
#stop = True
return None
# return subgraph data
subgraph_data = Data(atom_type=data.atom_type[is_keep_atom],
pos=data.pos[is_keep_atom],
edge_index=mapping[data.edge_index[:, is_keep_edge]],
edge_type=data.edge_type[is_keep_edge],
is_sidechain=data.is_sidechain[is_keep_atom],
atom2res=data.atom2res[is_keep_atom])
if self.transform is not None:
subgraph_data = self.transform(subgraph_data)
return subgraph_data
@staticmethod
def collate_fn(data):
batch = [_ for _ in data if _ is not None]
return Batch.from_data_list(batch)
def accumulate_grad_from_subgraph(model, atom_type, pos, bond_index, bond_type, batch, atom2res, batch_size=8, device='cuda:0',
is_sidechain=None, is_alpha=None, pos_gt=None, cutoff=10., max_residue=5000, transform=None):
"""
1. decompose the protein to subgraphs
2. evaluate subgraphs using trained models
3. accumulate atom-wise grads
4. return grads
"""
accumulated_grad = torch.zeros_like(pos)
accumulated_time = torch.zeros(pos.size(0), device=pos.deivce)
all_subgraphs = []
dummy_index = torch.arange(pos.size(0))
# prepare subgraphs
is_covered = torch.zeros(pos.size(0), device=pos.deivce).bool()
is_alpha_and_uncovered = is_alpha & (~is_covered)
while is_alpha_and_uncovered.sum().item() != 0:
alpha_index = dummy_index[is_alpha_and_uncovered]
center_atom_index = alpha_index[torch.randint(low=0, high=alpha_index.size(0), size=(1, ))] # (1, )
pos_center_atom = pos[center_atom_index] # (1, 3)
distance = (pos_center_atom - pos).norm(dim=-1)
mask = (distance <= cutoff)
is_keep_residue = scatter(mask, atom2res, dim=-1, dim_size=max_residue, reduce='sum') # (max_residue, )
is_keep_atom = is_keep_residue[atom2res]
is_keep_edge = (is_keep_atom[bond_index[0]]) & (is_keep_atom[bond_index[1]])
mapping = -torch.ones(pos.size(0), dtype=torch.long)
keep_index = dummy_index[is_keep_atom]
mapping[keep_index] = torch.arange(keep_index.size(0))
is_covered |= is_keep_atom
is_alpha_and_uncovered = is_alpha & (~is_covered)
if (is_sidechain[is_keep_atom]).sum().item() == 0:
continue
subgraph = Data(atom_type=atom_type[is_keep_atom],
pos=pos[is_keep_atom],
edge_index=mapping[bond_index[:, is_keep_edge]],
edge_type=bond_type[is_keep_edge],
is_sidechain=is_sidechain[is_keep_atom],
atom2res=atom2res[is_keep_atom],
mapping=keep_index)
if transform is not None:
subgraph = transform(subgraph)
all_subgraphs.append(subgraph)
# run model
tot_iters = (len(all_subgraphs) + batch_size - 1) // batch_size
for it in range(tot_iters):
batch = Batch.from_data_list(all_subgraphs[it * batch_size, (it + 1) * batch_size]).to(device)
class PackedConformationDataset(ConformationDataset):
def __init__(self, path, transform=None):
super().__init__(path, transform)
#k:v = idx: data_obj
self._pack_data_by_mol()
def _pack_data_by_mol(self):
"""
pack confs with same mol into a single data object
"""
self._packed_data = defaultdict(list)
if hasattr(self.data, 'idx'):
for i in range(len(self.data)):
self._packed_data[self.data[i].idx.item()].append(self.data[i])
else:
for i in range(len(self.data)):
self._packed_data[self.data[i].smiles].append(self.data[i])
print('[Packed] %d Molecules, %d Conformations.' % (len(self._packed_data), len(self.data)))
new_data = []
# logic
# save graph structure for each mol once, but store all confs
cnt = 0
for k, v in self._packed_data.items():
data = copy.deepcopy(v[0])
all_pos = []
for i in range(len(v)):
all_pos.append(v[i].pos)
data.pos_ref = torch.cat(all_pos, 0) # (num_conf*num_node, 3)
data.num_pos_ref = torch.tensor([len(all_pos)], dtype=torch.long)
#del data.pos
if hasattr(data, 'totalenergy'):
del data.totalenergy
if hasattr(data, 'boltzmannweight'):
del data.boltzmannweight
new_data.append(data)
self.new_data = new_data
def __getitem__(self, idx):
data = self.new_data[idx].clone()
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self):
return len(self.new_data)
|
py | 1a526f0a59d22c76830d3a1b9451cbd1322c10dd | # -*- coding: utf-8 -*-
#
# Pharmapendium_scripts documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pharmapendium_scripts'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pharmapendium_scriptsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'pharmapendium_scripts.tex',
u'Pharmapendium_scripts Documentation',
u"Eric Gilbert", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pharmapendium_scripts', u'Pharmapendium_scripts Documentation',
[u"Eric Gilbert"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pharmapendium_scripts', u'Pharmapendium_scripts Documentation',
u"Eric Gilbert", 'Pharmapendium_scripts',
'This project is for creating scripts to be used to access Pharmapendium using the API.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | 1a527060c2e3237e6f9ba4199ff095c44686605f | import py
from rpython.rlib.jit import JitDriver, dont_look_inside
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib import rgc
from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin
class DelTests:
def test_del_keep_obj(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
class Foo:
def __del__(self):
pass
def f(n):
x = None
while n > 0:
myjitdriver.can_enter_jit(x=x, n=n)
myjitdriver.jit_merge_point(x=x, n=n)
x = Foo()
Foo()
n -= 1
return 42
self.meta_interp(f, [20])
self.check_resops({'call': 4, # calls to a helper function
'guard_no_exception': 4, # follows the calls
'int_sub': 2,
'int_gt': 2,
'guard_true': 2,
'jump': 1})
def test_class_of_allocated(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
class Foo:
def __del__(self):
pass
def f(self):
return self.meth()
class X(Foo):
def meth(self):
return 456
class Y(Foo):
def meth(self):
return 123
def f(n):
x = None
while n > 0:
myjitdriver.can_enter_jit(x=x, n=n)
myjitdriver.jit_merge_point(x=x, n=n)
x = X()
y = Y()
assert x.f() == 456
assert y.f() == 123
n -= 1
return 42
res = self.meta_interp(f, [20])
assert res == 42
def test_instantiate_with_or_without_del(self):
import gc
mydriver = JitDriver(reds = ['n', 'x'], greens = [])
class Base: pass
class A(Base): foo = 72
class B(Base):
foo = 8
def __del__(self):
pass
def f(n):
x = 0
while n > 0:
mydriver.can_enter_jit(n=n, x=x)
mydriver.jit_merge_point(n=n, x=x)
if n % 2 == 0:
cls = A
else:
cls = B
inst = cls()
x += inst.foo
n -= 1
return 1
res = self.meta_interp(f, [20], enable_opts='')
assert res == 1
self.check_resops(call=1) # for the case B(), but not for the case A()
def test_keepalive(self):
py.test.skip("XXX fails") # hum, I think the test itself is broken
#
mydriver = JitDriver(reds = ['n', 'states'], greens = [])
class State:
num = 1
class X:
def __init__(self, state):
self.state = state
def __del__(self):
self.state.num += 1
@dont_look_inside
def do_stuff():
pass
def f(n):
states = []
while n > 0:
mydriver.jit_merge_point(n=n, states=states)
state = State()
states.append(state)
x = X(state)
do_stuff()
state.num *= 1000
do_stuff()
keepalive_until_here(x)
n -= 1
return states
def main(n):
states = f(n)
rgc.collect()
rgc.collect()
err = 1001
for state in states:
if state.num != 1001:
err = state.num
print 'ERROR:', err
return err
assert main(20) == 1001
res = self.meta_interp(main, [20])
assert res == 1001
class TestLLtype(DelTests, LLJitMixin):
pass
class TestOOtype(DelTests, OOJitMixin):
def setup_class(cls):
py.test.skip("XXX dels are not implemented in the"
" static CLI or JVM backend")
|
py | 1a527093aed44ea54843631636ab7ac64f95a699 | from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSDataReader, STSBenchmarkDataReader
from .TripletReader import TripletReader
from .InputExampleDocument import InputExampleDocument
|
py | 1a5270c3f4eafb203af9aa11265abe2cbffbb7c1 |
import logging
log = logging.getLogger(__name__)
import numpy
import westpa
from oldtools.aframe import AnalysisMixin, ArgumentError
class IterRangeMixin(AnalysisMixin):
'''A mixin for limiting the range of data considered for a given analysis. This should go after
DataManagerMixin'''
def __init__(self):
super(IterRangeMixin,self).__init__()
self.first_iter = None
self.last_iter = None
self.iter_step = 1
include_args = self.include_args.setdefault('IterRangeMixin',{})
include_args.setdefault('first_iter', True)
include_args.setdefault('last_iter', True)
include_args.setdefault('iter_step',True)
def add_args(self, parser, upcall = True):
if upcall:
try:
upfunc = super(IterRangeMixin,self).add_args
except AttributeError:
pass
else:
upfunc(parser)
group = parser.add_argument_group('analysis range')
if self.include_args['IterRangeMixin']['first_iter']:
group.add_argument('--start', '--begin', '--first', dest='first_iter', type=int, metavar='N_ITER', default=1,
help='''Begin analysis at iteration N_ITER (default: %(default)d).''')
if self.include_args['IterRangeMixin']['last_iter']:
group.add_argument('--stop', '--end', '--last', dest='last_iter', type=int, metavar='N_ITER',
help='''Conclude analysis with N_ITER, inclusive (default: last completed iteration).''')
if self.include_args['IterRangeMixin']['iter_step']:
group.add_argument('--step', dest='iter_step', type=int, metavar='STEP',
help='''Analyze/report in blocks of STEP iterations.''')
def process_args(self, args, upcall = True):
if self.include_args['IterRangeMixin']['first_iter']:
self.first_iter = args.first_iter or 1
if self.include_args['IterRangeMixin']['last_iter']:
self.last_iter = args.last_iter
if self.include_args['IterRangeMixin']['iter_step']:
self.iter_step = args.iter_step or 1
if upcall:
try:
upfunc = super(IterRangeMixin,self).process_args
except AttributeError:
pass
else:
upfunc(args)
def check_iter_range(self):
assert hasattr(self, 'data_manager') and self.data_manager is not None
self.first_iter = int(max(self.first_iter, 1))
if self.last_iter is None or self.last_iter > self.data_manager.current_iteration - 1:
self.last_iter = int(self.data_manager.current_iteration - 1)
if self.first_iter == self.last_iter:
raise ArgumentError('first and last iterations are the same')
westpa.rc.pstatus('Processing iterations from {self.first_iter:d} to {self.last_iter:d}, inclusive (step size {self.iter_step:d})'.format(self=self))
def iter_block_iter(self):
'''Return an iterable of (block_first,block_last+1) over the blocks of iterations
selected by --first/--last/--step. NOTE WELL that the second of the pair follows Python
iterator conventions and returns one past the last element of the block.'''
for blkfirst in range(self.first_iter, self.last_iter+1, self.iter_step):
yield(blkfirst, min(self.last_iter, blkfirst+self.iter_step-1)+1)
def n_iter_blocks(self):
'''Return the number of blocks of iterations (as returned by ``iter_block_iter``) selected by --first/--last/--step.'''
npoints = self.last_iter - self.first_iter + 1
if npoints % self.iter_step == 0:
return npoints // self.iter_step
else:
return npoints // self.iter_step + 1
def record_data_iter_range(self, h5object, first_iter = None, last_iter = None):
'''Store attributes ``first_iter`` and ``last_iter`` on the given HDF5 object (group/dataset)'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
h5object.attrs['first_iter'] = first_iter
h5object.attrs['last_iter'] = last_iter
def record_data_iter_step(self, h5object, iter_step = None):
'''Store attribute ``iter_step`` on the given HDF5 object (group/dataset).'''
iter_step = iter_step or self.iter_step
h5object.attrs['iter_step'] = iter_step
def check_data_iter_range_least(self, h5object, first_iter = None, last_iter = None):
'''Check that the given HDF5 object contains (as denoted by its ``first_iter``/``last_iter`` attributes) at least the
data range specified.'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
obj_first_iter = h5object.attrs.get('first_iter')
obj_last_iter = h5object.attrs.get('last_iter')
return (obj_first_iter <= first_iter and obj_last_iter >= last_iter)
def check_data_iter_range_equal(self, h5object, first_iter = None, last_iter = None):
'''Check that the given HDF5 object contains per-iteration data for exactly the specified iterations (as denoted by the
object's ``first_iter`` and ``last_iter`` attributes'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
obj_first_iter = h5object.attrs.get('first_iter')
obj_last_iter = h5object.attrs.get('last_iter')
return (obj_first_iter == first_iter and obj_last_iter == last_iter)
def check_data_iter_step_conformant(self, h5object, iter_step = None):
'''Check that the given HDF5 object contains per-iteration data at an iteration stride suitable for extracting data
with the given stride. (In other words, is the given ``iter_step`` a multiple of the stride with
which data was recorded.)'''
iter_step = iter_step or self.iter_step
obj_iter_step = h5object.attrs.get('iter_step')
return (obj_iter_step % iter_step == 0)
def check_data_iter_step_equal(self, h5object, iter_step = None):
'''Check that the given HDF5 object contains per-iteration data at an iteration stride the same as
that specified.'''
iter_step = iter_step or self.iter_step
obj_iter_step = h5object.attrs.get('iter_step')
return (obj_iter_step == iter_step)
def slice_per_iter_data(self, dataset, first_iter = None, last_iter = None, iter_step = None, axis=0):
'''Return the subset of the given dataset corresponding to the given iteration range and stride. Unless
otherwise specified, the first dimension of the dataset is the one sliced.'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
iter_step = iter_step or self.iter_step
ds_first_iter = dataset.attrs['first_iter']
ds_last_iter = dataset.attrs['last_iter']
ds_iter_step = dataset.attrs.get('iter_step', 1)
if first_iter < ds_first_iter or last_iter > ds_last_iter or ds_iter_step % iter_step > 0:
raise IndexError(('Cannot slice requested iterations [{:d},{:d}] (stride={:d}) from dataset {!r}'
+'with range [{:d},{:d}] (stride={:d}).'.format(first_iter,last_iter,iter_step,
ds_first_iter,ds_last_iter,ds_iter_step)))
dimslices = []
for idim in range(len(dataset.shape)):
if idim == axis:
dimslices.append(slice(first_iter - ds_first_iter, last_iter - ds_first_iter + iter_step, iter_step))
else:
dimslices.append(slice(None,None,None))
dimslices = tuple(dimslices)
log.debug('slicing {!r} with {!r}'.format(dataset, dimslices))
data = dataset[dimslices]
log.debug('resulting data is of shape {!r}'.format(data.shape))
return data
def iter_range(self, first_iter = None, last_iter = None, iter_step = None):
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
iter_step = iter_step or self.iter_step
return numpy.arange(first_iter, last_iter + 1, iter_step)
|
py | 1a527345ef6bdbefa1e2b2a679fa1d0072c3e515 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.GradientDescentOptimizer"])
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._learning_rate_tensor = None
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * self._learning_rate)
def _apply_sparse_duplicate_indices(self, grad, var):
delta = ops.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
|
py | 1a5273b9b9515e4201022944804a7369474b5eef | # -*- coding: utf-8 -*-
# The Paginator class has lines from the repository "Amy".
# Copyright (c) 2014-2015 Software Carpentry and contributors
import base64
from datetime import date
import hashlib
from typing import Union
from django.core.paginator import Paginator as DjangoPaginator
import requests
from visitors.models import Subscriber
PAGINATOR_DIVIDER_THRESHOLD = 10
class Paginator(DjangoPaginator):
"""Everything should work as in django.core.paginator.Paginator, except
this class provides additional generator for nicer set of pages."""
_page_number = None
def page(self, number):
"""Overridden to store retrieved page number somewhere."""
self._page_number = number
return super().page(number)
def paginate_sections(self):
"""Divide pagination range into 3 sections.
Each section should contain approx. 5 links. If sections are
overlapping, they're merged.
The results might be:
* L…M…R
* LM…R
* L…MR
* LMR
where L - left section, M - middle section, R - right section, and "…"
stands for a separator.
"""
index = int(self._page_number) or 1
items = self.page_range
# The number of pages is low, so we don't need to divide them.
if items and items[-1] <= PAGINATOR_DIVIDER_THRESHOLD:
return list(items)
L = items[0:5]
M = items[index-3:index+4] or items[0:index+1]
R = items[-5:]
L_s = set(L)
M_s = set(M)
R_s = set(R)
D1 = L_s.isdisjoint(M_s)
D2 = M_s.isdisjoint(R_s)
if D1 and D2:
# L…M…R
pagination = list(L) + [None] + list(M) + [None] + list(R)
elif not D1 and D2:
# LM…R
pagination = sorted(L_s | M_s) + [None] + list(R)
elif D1 and not D2:
# L…MR
pagination = list(L) + [None] + sorted(M_s | R_s)
else:
# LMR
pagination = sorted(L_s | M_s | R_s)
return pagination
def get_user_profile(request):
avatar = False
first_name = False
about_to_expire = False
expired = False
user = None
if request.user.is_authenticated:
user = request.user
try:
user.subscriber
except:
return {}
if not user.subscriber.avatar:
fetch_and_save_avatar(user)
first_name = user.first_name
avatar = user.subscriber.avatar
if not user.subscriber or not user.subscriber.credits:
expired = True
elif user.subscriber.credits <= 30:
about_to_expire = True
elif user.subscriber.credits <= 0:
expired = True
context = {
'avatar': avatar,
'first_name': first_name,
'about_to_expire': about_to_expire,
'expired': expired,
}
if user:
context["credits"] = user.subscriber.credits
if user and user.subscriber.credits is not None:
if context['credits'] < 0:
context['credits'] = 0
else:
context["credits"] = 0
return context
def fetch_and_save_avatar(user):
email = user.email.encode("utf-8")
email_hash = hashlib.md5()
email_hash.update(email)
url = "https://www.gravatar.com/{}.json".format(email_hash.hexdigest())
r = requests.get(url)
if r.json() == "User not found":
img = 'iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAABmJLR0QA/wD/AP+gvaeTAAAFz0lEQVR4nO2caWhdRRTHf8kzaYIkZrGKxqVIG5fULcagHzRY6ZcYFUUUUUGxotW6gAhFBAVB/CCCIi3ihra4YFWoNBZiBVu0QrV1r9XUWBOTujVNWmlMs/jhvGCMue/de2fmzn0v5wfnQ8i7M/85c+fOdmZAURRFURRFURRFURRFURRFURRFURRFKRJKgGrfIlJENeITL6wA9gKTwH7gdeAqoMyXIA9kgA5gLbAP8cUAsDxpIZdlM5/NdgM3A0ckLSpBypAy7iLYD0uTFPRuDiFT9iXQmqSohGgBviB/+dclKWpPCEGTwBjwJFCRpDhHlAOPA4cJV/buJMX1hxQ1ZTuBpiQFWuZs4FuilXlPkgLDtpDptg9ot6ihHKgHTgHOBc5DKv1EoMZiPtcBB4le3h8sasjL1hgCpz5h90TMqwEZyawE1gDbgUMh8xsANgGrgLuAJcCREfJ+EJiIWdbNEctpxPMxRU7ZMwSPwqqAy4GniP6ZCGOjiLMeAS4K0FEGvGiYz6qcHrTM3YZiJ4H3+HdSmUE+Z+uAvy2kHcUGgCeAxVktNUirMk33jhh+jU2TBcGTyND4MaDPUnqmtg34zlJai2J7NyY/WhJejPZ9XKeWxn0Q+eQos9PpI9ML8f8mptVaDPwamxLgp4hC54J1Y7Dia/LJmgRWGzxfrKxGfBML07X7WqCXaJOtYmYYWSkYjpuASQsBGAReMUyjmHgZg8qwxSJgHP/fbt82Diw09KU1NuDfIb5tvbEXLdLK3G4lE8AFxl60zHP4d4wv68G8PwZbiSCjtTMtpVWILABu9y1iOlfj/y31bX8CR5s6MmOaANLK3gTmW0irkKlEKuZ930La8f92psX+wDCgw0YfcouFNIqFeuBKkwSiBLOVIwEFDUgnPoxsdXaYCChC7kT6E5BW04eMwkbDPJxvLasN2d9uBxqRShic9v8aoC6C2LlCLxK/BeKfKiTKsRMJMgwMgAiqkDokFHIeUru9SBzvyIzfPY3sryv/5WJgy7S/K4DjkIXHBUjcQBcSGmWVD/HfkabRYr+kUQOi64BmZBJYD5wTN+Mi5ybkc96DxLDtDvtgmFFWCTJy6MwmfiMS8LYNPRsSxBgSudiEHFXYgRzfMN43agHeQIKMZ7aGKvx/GtJqXTN81QDcB7yNtJ5YzEcCGYI6/ooUFDyttiGHXxcCZwT9M1cf8nvWghhBYmwrc/xmrpJr9JTzmILpTN36sK1IiO0X0wr52vD5YuWruA+aVsh2w+fz8RuwMcTv9gPfhPjdLpJp1TsSyGNWOnDbOU4AtwKXEDwJ3QSchAzBN+ZI61PkYM9fjjUPI+t+XqgAhvIItFEp92fza0XOXXQBz/L/k64lwA1IJfUjyz1bkENC8zA/1xLG1kb2omXW4L6Qn1nS+kkCWq+wpDU25+O+kAewc0PCz451dmNnF9aYzbivlOMNNWaQuZNLjfcaarTGUtxXyLWGGpsd6+vHwlqVrTCgLtzH+F5q+HybFRXBLENGcKnhGGST39UbGPuYWJZ3HGp7y1CbM5YgS8+uCh73LW8g/JUYUW0nsvKdWlbirkJei6npYUd6DlIAEZulwEu4ccAI0UdblUhMgG0to8j9YAVBKdLJp6GVPOpAw2Hgmog6vJNBrtBwUSnXI+tbuWgETsX+3OMAKZiNm7ACNx391jz5foxcm2Ezz15kgbLgaUMiMGw6ZxA4PSC/BuAXy/mtB4419kSKqAZeIP51R7PZMPAQ8mmqRYLRlgO/WsxjCNkCKFqagY+w/wmzbWPIFVG1btyQLjLAbcjqqG/Hz7QJZKMrdWcGkyCDDB/T0GIOIefMz3Ja4gKiEZkz9JBcJYwjWwfLgKPcF7FwWQw8AHxAvAsoc9kA8CoSQZi6Y3je7imPQAY4DRkMNAEnACcjof3VyL7+9GC9IeTN34vMG/qQW4s+R6Jk+hPSrSiKoiiKoiiKoiiKoiiKoiiKoiiKoiiKRf4BC3RbvGqA5OgAAAAASUVORK5CYII='
else:
thumbnail_url = [
i['thumbnailUrl']
for i in r.json()['entry']
][0]
r = requests.get(thumbnail_url)
img = base64.b64encode(r.content)
s = Subscriber.objects.get(user=user)
s.avatar = img
s.save()
def is_dni(value: Union[str, int]) -> bool:
value = value.strip()
try:
int(value)
except ValueError:
return False
if len(value) >= 7:
return True
return False
|
py | 1a5274cc83e272e4b983c6715b2840a323ad39dc | # this is the script that i used to create output videos and gifs
# simply put all the animations, one per each folder
import os
import subprocess
import logging
first_frame_duration = 1
last_frame_duration = 5
fps = 60
source = "frames"
videos_dir = "videos"
h264_videos_dir = "h264"
gifs_dir = "gifs"
completed = 0
logging.basicConfig(level=logging.INFO, filename="generate-videos.log", filemode="w+", format='%(asctime)s %(levelname)s %(message)s')
logging.info("Creating folders")
if not os.path.exists(videos_dir):
os.makedirs(videos_dir)
if not os.path.exists(h264_videos_dir):
os.makedirs(h264_videos_dir)
if not os.path.exists(gifs_dir):
os.makedirs(gifs_dir)
logging.info("Listing file")
dirs = os.listdir(source)
for dir in dirs:
logging.info(f"Started conversion for folder {dir}")
# LIST OF FILES
files = os.listdir(f"{source}/{dir}")
# create video
options = f"ffmpeg -y -r {fps} -i {source}/{dir}/%07d.png -loop 0 {videos_dir}/{dir}.mp4"
subprocess.run(options.split(" "))
logging.info("mp4 video created")
# create h264 video
options = f"ffmpeg -y -r {fps} -i {source}/{dir}/%07d.png -c:a aac -b:a 256k -ar 44100 -c:v libx264 -pix_fmt yuv420p -r {fps} {h264_videos_dir}/{dir}_h264.mp4"
subprocess.run(options.split(" "))
logging.info("h264 video created")
# create gif
options = f"ffmpeg -y -i {videos_dir}/{dir}.mp4 -loop 0 -filter_complex fps=25,scale=500:-1,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse {gifs_dir}/{dir}.gif"
subprocess.run(options.split(" "))
logging.info("gif video created")
logging.info(f"Completed folder {dir}! Folder {completed + 1}/{len(dirs)}")
completed += 1
logging.info("Removing temp folder")
logging.info("Everything completed")
|
py | 1a52750916b591741da079dfd94e4b199d7e60a9 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.errors",
marshal="google.ads.googleads.v10",
manifest={"RequestErrorEnum",},
)
class RequestErrorEnum(proto.Message):
r"""Container for enum describing possible request errors.
"""
class RequestError(proto.Enum):
r"""Enum describing possible request errors."""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_NAME_MISSING = 3
RESOURCE_NAME_MALFORMED = 4
BAD_RESOURCE_ID = 17
INVALID_CUSTOMER_ID = 16
OPERATION_REQUIRED = 5
RESOURCE_NOT_FOUND = 6
INVALID_PAGE_TOKEN = 7
EXPIRED_PAGE_TOKEN = 8
INVALID_PAGE_SIZE = 22
REQUIRED_FIELD_MISSING = 9
IMMUTABLE_FIELD = 11
TOO_MANY_MUTATE_OPERATIONS = 13
CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT = 14
CANNOT_MODIFY_FOREIGN_FIELD = 15
INVALID_ENUM_VALUE = 18
DEVELOPER_TOKEN_PARAMETER_MISSING = 19
LOGIN_CUSTOMER_ID_PARAMETER_MISSING = 20
VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN = 21
CANNOT_RETURN_SUMMARY_ROW_FOR_REQUEST_WITHOUT_METRICS = 29
CANNOT_RETURN_SUMMARY_ROW_FOR_VALIDATE_ONLY_REQUESTS = 30
INCONSISTENT_RETURN_SUMMARY_ROW_VALUE = 31
TOTAL_RESULTS_COUNT_NOT_ORIGINALLY_REQUESTED = 32
RPC_DEADLINE_TOO_SHORT = 33
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a527546b32a57ccf42d7777b93ada26fc2af5c5 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.structures import ImageList
from .build import SSHEAD_REGISTRY
from .ss_layers import Flatten
class CycleEnergyHead(nn.Module):
def __init__(self, cfg, cin):
super(CycleEnergyHead, self).__init__()
self.name = 'cycle'
self.input = 'ROI'
self.device = torch.device(cfg.MODEL.DEVICE)
self.coef = cfg.MODEL.SS.COEF
self.enc1 = nn.Sequential(
nn.Conv2d(cin, 256, kernel_size=3, padding=0, bias=True),
# nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=0, bias=True),
# nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(1)
# nn.Flatten(start_dim=1, end_dim=-1)
)
self.map_back = nn.Linear(256, 256*49)
self.topk = 100
self.bs = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
self.scale = cfg.MODEL.SS.LOSS_SCALE
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 0)
def cal_pair_dist(self, feat_u, feat_v):
# finding the similarity score of feat_v
us = feat_u.size(0)
vs = feat_v.size(0)
fs = feat_u.size(1)
assert fs == feat_v.size(1)
uu = feat_u.unsqueeze(1).repeat(1, vs, 1).view(-1, fs)
vv = feat_v.repeat(us, 1)
diff = uu - vv
dist = (diff * diff).sum(dim=1).view(us, vs) * self.coef
score = F.softmax(dist, dim=1)
return dist, score
def computer_corr_softmax(self, feat_u, feat_v):
# track forward
# calculate the L2 distance between feat_u and feat_v
sim_dist, sim_score = self.cal_pair_dist(feat_u, feat_v)
soft_v = torch.matmul(sim_score, feat_v)
# track backward
back_dist, back_score = self.cal_pair_dist(soft_v, feat_u)
labels = torch.arange(len(feat_u)).long().to(back_dist.device)
loss = nn.CrossEntropyLoss()(back_dist, labels)
if back_dist.size(1) == 0:# there is no objects in the first frame.
print(back_dist.size(), feat_u.size(), feat_v.size(), loss)
correct = (back_dist.argmax(dim=1) == labels).float().sum()
count = len(back_dist)
return loss, correct, count, soft_v
def forward(self, features, prev_boxes=None):
features, idxs, proposals = features
total_loss = 0.0
corrects = 0
counts = 0
pos_fea= None
neg_fea = None
prev = 0
# since the number of proposals might be different for different pairs
if prev_boxes is not None:
feat_u = self.enc1(features)
feat_v = self.enc1(prev_boxes)
feat_u = feat_u.view(feat_u.size(0), feat_u.size(1))
feat_v = feat_v.view(feat_v.size(0), feat_v.size(1))
if feat_u.size(0) == 0:
print(feat_u, feat_v)
return {'loss_cycle': feat_u.sum() * self.scale}, 0.
total_loss, correct, cnt, _ = self.computer_corr_softmax(feat_u, feat_v)
# print('correct: ', correct, 'cnt: ', cnt)
total_acc = correct.item()/cnt
else:
for i in range(0, len(idxs), 2):
u = features[prev:idxs[i]]
v = features[idxs[i]: idxs[i+1]]
prev = idxs[i+1]
feat_u = self.enc1(u)
feat_v = self.enc1(v)
feat_u = feat_u.view(feat_u.size(0), feat_u.size(1))
feat_v = feat_v.view(feat_v.size(0), feat_v.size(1))
if feat_u.size(0) == 0:
print(feat_u.size(), feat_v.size())
loss = feat_u.sum()
correct = 0
cnt = 0
else:
loss, correct, cnt, soft_target = self.computer_corr_softmax(feat_u, feat_v)
if pos_fea is None:
pos_fea = self.map_back(feat_u)
neg_fea = self.map_back(soft_target)
else:
pos_fea = torch.cat([pos_fea, self.map_back(feat_u)], 0)
neg_fea = torch.cat([neg_fea, self.map_back(soft_target)], 0)
total_loss += loss*cnt
corrects += correct
counts += cnt
# breakpoint()
if counts != 0:
total_loss /= counts
total_acc = corrects/counts
else:
total_acc = 0.
if pos_fea is not None:
assert len(pos_fea) == len(neg_fea)
# print('total loss: {:.4f}\ttotal acc: {:.3f}'.format(total_loss, total_acc))
return {'loss_cycle': total_loss * self.scale}, total_acc, torch.cat([pos_fea, neg_fea], 0)
else:
return {'loss_cycle': total_loss * self.scale}, total_acc, None
@SSHEAD_REGISTRY.register()
def build_cycle_energy_head(cfg, input_shape):
in_channels = cfg.MODEL.FPN.OUT_CHANNELS
rot_head = CycleEnergyHead(cfg, in_channels)
return rot_head
|
py | 1a527610645dded90211c699d052b12cc1a37851 | import argparse
import speakeasy
class DbgView(speakeasy.Speakeasy):
"""
Print debug port prints to the console
"""
def __init__(self, debug=False):
super(DbgView, self).__init__(debug=debug)
def debug_print_hook(self, emu, api_name, func, params):
# Call the DbgPrint* function and print the formatted string to the console
rv = func(params)
formatted_str = params[0]
print(formatted_str)
return rv
def debug_printex_hook(self, emu, api_name, func, params):
# Call the DbgPrintEx function and print the formatted string to the console
rv = func(params)
formatted_str = params[2]
print(formatted_str)
return rv
def main(args):
dbg = DbgView()
module = dbg.load_module(args.file)
dbg.add_api_hook(dbg.debug_print_hook, "ntoskrnl", "DbgPrint")
dbg.add_api_hook(dbg.debug_printex_hook, "ntoskrnl", "DbgPrintEx")
# Emulate the module
dbg.run_module(module, all_entrypoints=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Print debug port prints to the console"
)
parser.add_argument(
"-f",
"--file",
action="store",
dest="file",
required=True,
help="Path of driver to emulate",
)
args = parser.parse_args()
main(args)
|
py | 1a5276870e47b60458a4b2e37b6c46a6e6a15db7 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC参数
Case Name : enable_hashjoin参数使用gs_guc set设置并验证其预期结果
Description :
1.查询enable_hashjoin默认值
2.修改enable_hashjoin为off
3.重启使其生效
4.校验其预期结果
5.恢复默认值
Expect :
1.查询enable_hashjoin默认值成功
2.修改enable_hashjoin为off成功
3.重启集群成功
4.该参数值为off,达到预期效果
5.恢复默认值成功
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
LOG = Logger()
class GucQueryplan(unittest.TestCase):
def setUp(self):
LOG.info('----this is setup------')
LOG.info(
'--------Opengauss_Function_Guc_Queryplan_Case0007--------')
self.comsh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.pv = ''
def test_Guc_queryplan(self):
LOG.info(
'--------查看enable_hashjoin默认值-----')
msg = self.comsh.execut_db_sql('show enable_hashjoin;')
LOG.info(msg)
self.pv = msg.splitlines()[-2].strip()
LOG.info(
'------修改enable_hashjoin为off----')
msg = self.comsh.execute_gsguc('set', self.constant.GSGUC_SUCCESS_MSG,
'enable_hashjoin=off')
LOG.info(msg)
LOG.info('-------重启数据库------')
self.comsh.restart_db_cluster()
status = self.comsh.get_db_cluster_status()
self.assertTrue("Normal" in status or 'Degraded' in status)
LOG.info(
'-------校验其预期结果-------')
msg = self.comsh.execut_db_sql('show enable_hashjoin;')
LOG.info(msg)
res = msg.splitlines()[-2].strip()
self.assertIn(self.constant.BOOLEAN_VALUES[1], res)
def tearDown(self):
LOG.info(
'----this is tearDown-------')
LOG.info(
'-------恢复默认值------')
msg = self.comsh.execute_gsguc('set', self.constant.GSGUC_SUCCESS_MSG,
f'enable_hashjoin={self.pv}')
LOG.info(msg)
stopmsg = self.comsh.stop_db_cluster()
LOG.info(stopmsg)
startmsg = self.comsh.start_db_cluster()
LOG.info(startmsg)
LOG.info(
'------Opengauss_Function_Guc_Queryplan_Case0007执行完成------')
|
py | 1a5276f8bf510ab6cf8b546f74781d20503a71c1 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from fairseq import utils
import torch
from . import FairseqCriterion, register_criterion
@register_criterion('cokd_loss')
class COKDCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.kd_alpha = args.kd_alpha
self.eps = args.label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--kd-alpha', default=0.5, type=float)
parser.add_argument('--num-teachers', default=1, type=int)
# fmt: on
def forward(self, model, sample, reduce=True, teachers = None):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
if teachers is None:
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
else:
net_output_teachers = [teacher(**sample['net_input']) for teacher in teachers]
loss, nll_loss = self.compute_kd_loss(model, net_output, net_output_teachers, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)#fairseq/models/fairseq_model.py:sample['target']
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
def compute_kd_loss(self, model, net_output, net_output_teachers, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
teacher_probs = [model.get_normalized_probs(net_output_teacher, log_probs=False) for net_output_teacher in net_output_teachers]
teacher_prob = torch.mean(torch.stack(teacher_probs, dim = 0), dim = 0)
teacher_prob = teacher_prob.view(-1, teacher_prob.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
kd_loss = (-lprobs * teacher_prob).sum(dim = -1, keepdim=True)[non_pad_mask]
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
kd_loss = kd_loss.sum()
loss = nll_loss * (1 - self.kd_alpha) + kd_loss * self.kd_alpha
return loss, nll_loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
return {
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
|
py | 1a5277495d4e8461f1c092b9295f74e7779e32e4 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_network_policy_list import V1NetworkPolicyList # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1NetworkPolicyList(unittest.TestCase):
"""V1NetworkPolicyList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1NetworkPolicyList(self):
"""Test V1NetworkPolicyList"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_network_policy_list.V1NetworkPolicyList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a52784a9e2d5924fe0c7e259c4ff5a556e8e0fa | import math
import random
from collections import namedtuple, deque
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from rllite.common import ReplayBuffer2
USE_CUDA = torch.cuda.is_available()
class StochasticMDP:
def __init__(self):
self.end = False
self.current_state = 2
self.num_actions = 2
self.num_states = 6
self.p_right = 0.5
def reset(self):
self.end = False
self.current_state = 2
state = np.zeros(self.num_states)
state[self.current_state - 1] = 1.
return state
def step(self, action):
if self.current_state != 1:
if action == 1:
if random.random() < self.p_right and self.current_state < self.num_states:
self.current_state += 1
else:
self.current_state -= 1
if action == 0:
self.current_state -= 1
if self.current_state == self.num_states:
self.end = True
state = np.zeros(self.num_states)
state[self.current_state - 1] = 1.
if self.current_state == 1:
if self.end:
return state, 1.00, True, {}
else:
return state, 1.00 / 100.00, True, {}
else:
return state, 0.0, False, {}
class Net(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.layers = nn.Sequential(
nn.Linear(num_inputs, 256),
nn.ReLU(),
nn.Linear(256, num_outputs)
)
def forward(self, x):
return self.layers(x)
def act(self, state, epsilon):
if random.random() > epsilon:
state = torch.FloatTensor(state).unsqueeze(0)
action = self.forward(state).max(1)[1]
return action.data[0]
else:
return random.randrange(self.num_outputs)
class HierarchicalDQN(object):
def __init__(self):
self.env = StochasticMDP()
self.num_goals = self.env.num_states
self.num_actions = self.env.num_actions
self.model = Net(2*self.num_goals, self.num_actions)
self.target_model = Net(2*self.num_goals, self.num_actions)
self.meta_model = Net(self.num_goals, self.num_goals)
self.target_meta_model = Net(self.num_goals, self.num_goals)
if USE_CUDA:
self.model = self.model.cuda()
self.target_model = self.target_model.cuda()
self.meta_model = self.meta_model.cuda()
self.target_meta_model = self.target_meta_model.cuda()
self.optimizer = optim.Adam(self.model.parameters())
self.meta_optimizer = optim.Adam(self.meta_model.parameters())
self.replay_buffer = ReplayBuffer2(10000)
self.meta_replay_buffer = ReplayBuffer2(10000)
def to_onehot(self, x):
oh = np.zeros(6)
oh[x - 1] = 1.
return oh
def update(self, model, optimizer, replay_buffer, batch_size):
if batch_size > len(replay_buffer):
return
state, action, reward, next_state, done = replay_buffer.sample(batch_size)
state = torch.FloatTensor(state)
next_state = torch.FloatTensor(next_state)
action = torch.LongTensor(action)
reward = torch.FloatTensor(reward)
done = torch.FloatTensor(done)
q_value = model(state)
q_value = q_value.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value = model(next_state).max(1)[0]
expected_q_value = reward + 0.99 * next_q_value * (1 - done)
loss = (q_value - expected_q_value).pow(2).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
def learn(self, num_frames=100000, epsilon_start=1.0, epsilon_final=0.01, epsilon_decay=500):
frame_idx = 1
state = self.env.reset()
done = False
all_rewards = []
episode_reward = 0
while frame_idx < num_frames:
goal = self.meta_model.act(state, epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay))
onehot_goal = self.to_onehot(goal)
meta_state = state
extrinsic_reward = 0
while not done and goal != np.argmax(state):
goal_state = np.concatenate([state, onehot_goal])
action = self.model.act(goal_state, epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay))
next_state, reward, done, _ = self.env.step(action)
episode_reward += reward
extrinsic_reward += reward
intrinsic_reward = 1.0 if goal == np.argmax(next_state) else 0.0
self.replay_buffer.push(goal_state, action, intrinsic_reward, np.concatenate([next_state, onehot_goal]),
done)
state = next_state
self.update(self.model, self.optimizer, self.replay_buffer, 32)
self.update(self.meta_model, self.meta_optimizer, self.meta_replay_buffer, 32)
frame_idx += 1
if frame_idx % 1000 == 0:
n = 100 # mean reward of last 100 episodes
plt.figure(figsize=(20, 5))
plt.title(frame_idx)
plt.plot([np.mean(all_rewards[i:i + n]) for i in range(0, len(all_rewards), n)])
plt.show()
self.meta_replay_buffer.push(meta_state, goal, extrinsic_reward, state, done)
if done:
state = self.env.reset()
done = False
all_rewards.append(episode_reward)
episode_reward = 0
print(frame_idx)
if __name__ == '__main__':
model = HierarchicalDQN()
model.learn()
|
py | 1a5278e627aff094f0737bae9fd4906c239214ce | import numpy
try:
import cupy
xpy_default=cupy
junk_to_check_installed = cupy.array(5) # this will fail if GPU not installed correctly
except:
xpy_default=numpy
def TimeDelayFromEarthCenter(
detector_earthfixed_xyz_metres,
source_right_ascension_radians,
source_declination_radians,
greenwich_mean_sidereal_time,
xpy=xpy_default, dtype=numpy.float64,
):
"""
Parameters
----------
detector_earthfixed_xyz_metres : array_like, shape = det_shape + (3,)
Location of detector(s) relative to Earth's center in meters. May provide
multiple detectors, last axis must be (x,y,z) but other axes can take
whatever form is desired.
source_right_ascension_radians : array_like, shape = sample_shape
Right ascension of source in radians, can be an arbitrary dimensional
array.
source_declination_radians : array_like, shape = sample_shape
Declination of source in radians, can be an arbitrary dimensional array.
greenwich_mean_sidereal_time : float
Should be equivalent to XLALGreenwichMeanSiderealTime(gpstime).
Returns
-------
time_delay_from_earth_center : array_like, shape = det_shape + sample_shape
"""
negative_speed_of_light = xpy.asarray(-299792458.0)
det_shape = detector_earthfixed_xyz_metres.shape[:-1]
sample_shape = source_right_ascension_radians.shape
cos_dec = xpy.cos(source_declination_radians)
greenwich_hour_angle = (
greenwich_mean_sidereal_time - source_right_ascension_radians
)
ehat_src = xpy.empty(sample_shape + (3,), dtype=dtype)
ehat_src[...,0] = cos_dec * xpy.cos(greenwich_hour_angle)
ehat_src[...,1] = -cos_dec * xpy.sin(greenwich_hour_angle)
ehat_src[...,2] = xpy.sin(source_declination_radians)
neg_separation = xpy.inner(detector_earthfixed_xyz_metres, ehat_src)
return xpy.divide(
neg_separation, negative_speed_of_light,
out=neg_separation,
)
def ComputeDetAMResponse(
detector_response_matrix,
source_right_ascension_radians,
source_declination_radians,
source_polarization_radians,
greenwich_mean_sidereal_time,
xpy=xpy_default, dtype_real=numpy.float64, dtype_complex=numpy.complex128,
):
"""
Parameters
----------
detector_response_matrix : array_like, shape = det_shape + (3, 3)
Detector response matrix, or matrices for multiple detectors. Last two
axes must be 3-by-3 response matrix, and may include arbitrary axes before
that for various detectors.
source_right_ascension_radians : array_like, shape = sample_shape
Right ascension of source in radians, can be an arbitrary dimensional
array.
source_declination_radians : array_like, shape = sample_shape
Declination of source in radians, can be an arbitrary dimensional array.
source_polarization_radians : array_like, shape = sample_shape
Polarization angle of source in radians, can be an arbitrary dimensional
array.
greenwich_mean_sidereal_time : float
Should be equivalent to XLALGreenwichMeanSiderealTime(gpstime).
Returns
-------
F : array_like, shape = det_shape + sample_shape
"""
det_shape = detector_response_matrix.shape[:-1]
sample_shape = source_right_ascension_radians.shape
matrix_shape = 3, 3
# Initialize trig matrices.
X = xpy.empty(sample_shape+(3,), dtype=dtype_real)
Y = xpy.empty(sample_shape+(3,), dtype=dtype_real)
# Greenwich hour angle of source in radians.
source_greenwich_radians = (
greenwich_mean_sidereal_time - source_right_ascension_radians
)
# Pre-compute trig functions
cos_gha = xpy.cos(source_greenwich_radians)
sin_gha = xpy.sin(source_greenwich_radians)
cos_dec = xpy.cos(source_declination_radians)
sin_dec = xpy.sin(source_declination_radians)
cos_psi = xpy.cos(source_polarization_radians)
sin_psi = xpy.sin(source_polarization_radians)
# Populate trig matrices.
X[...,0] = -cos_psi*sin_gha - sin_psi*cos_gha*sin_dec
X[...,1] = -cos_psi*cos_gha + sin_psi*sin_gha*sin_dec
X[...,2] = sin_psi*cos_dec
Y[...,0] = sin_psi*sin_gha - cos_psi*cos_gha*sin_dec
Y[...,1] = sin_psi*cos_gha + cos_psi*sin_gha*sin_dec
Y[...,2] = cos_psi*cos_dec
# Compute F for each polarization state.
F_plus = (
X*xpy.inner(X, detector_response_matrix) -
Y*xpy.inner(Y, detector_response_matrix)
).sum(axis=-1)
F_cross = (
X*xpy.inner(Y, detector_response_matrix) +
Y*xpy.inner(X, detector_response_matrix)
).sum(axis=-1)
return F_plus + 1.0j*F_cross
|
py | 1a5279c769c4e481d5e3d5e78057c73d091bb5c3 | import pytest
from insights.parsers import ParseException
from insights.tests import context_wrap
from insights.parsers.gluster_vol import GlusterVolInfo
TRACKING_VALID = """
Volume Name: test_vol
cluster.choose-local: off
network.remote-dio: enable
performance.low-prio-threads: 32
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
nfs.disable: on
performance.client-io-threads: off
"""
TRACKING_INVALID = """ """
MULTIPLE_VOLUMES = """
Volume Name: test_vol
Type: Replicate
Volume ID: 2c32ed8d-5a07-4a76-a73a-123859556974
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: 172.17.18.42:/home/brick
Brick2: 172.17.18.43:/home/brick
Brick3: 172.17.18.44:/home/brick
Options Reconfigured:
cluster.choose-local: off
user.cifs: off
features.shard: on
cluster.shd-wait-qlength: 10000
cluster.shd-max-threads: 8
cluster.locking-scheme: granular
cluster.data-self-heal-algorithm: full
cluster.server-quorum-type: server
cluster.quorum-type: auto
cluster.eager-lock: enable
network.remote-dio: enable
performance.low-prio-threads: 32
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
Volume Name: test_vol_2
Type: Replicate
Volume ID: dd821df9-ee2e-429c-98a0-81b1b794433d
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: 172.17.18.42:/home/brick2
Brick2: 172.17.18.43:/home/brick2
Brick3: 172.17.18.44:/home/brick2
Options Reconfigured:
cluster.choose-local: off
user.cifs: off
features.shard: on
cluster.shd-wait-qlength: 10000
cluster.shd-max-threads: 8
cluster.locking-scheme: granular
cluster.data-self-heal-algorithm: full
cluster.server-quorum-type: server
cluster.quorum-type: auto
cluster.eager-lock: enable
network.remote-dio: enable
performance.low-prio-threads: 32
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
""".strip()
def test_invalid():
with pytest.raises(ParseException) as e:
GlusterVolInfo(context_wrap(TRACKING_INVALID))
assert "Unable to parse gluster volume options: []" in str(e)
def test_gluster_volume_options():
parser_result = GlusterVolInfo(context_wrap(TRACKING_VALID))
assert parser_result is not None
data = parser_result.data["test_vol"]
assert data['network.remote-dio'] == 'enable'
assert data['cluster.choose-local'] == 'off'
assert data['performance.client-io-threads'] == 'off'
assert data['performance.quick-read'] == 'off'
assert data['performance.low-prio-threads'] == '32'
assert data['performance.io-cache'] == 'off'
assert data['performance.read-ahead'] == 'off'
assert data['nfs.disable'] == 'on'
def test_gluster_multiple_volume_options():
parser_result = GlusterVolInfo(context_wrap(MULTIPLE_VOLUMES))
assert parser_result is not None
data = parser_result.data["test_vol"]
assert data['network.remote-dio'] == 'enable'
assert data['cluster.choose-local'] == 'off'
assert data['performance.client-io-threads'] == 'off'
assert data['performance.quick-read'] == 'off'
assert data['performance.low-prio-threads'] == '32'
assert data['performance.io-cache'] == 'off'
assert data['performance.read-ahead'] == 'off'
assert data['nfs.disable'] == 'on'
data = parser_result.data["test_vol_2"]
assert data['network.remote-dio'] == 'enable'
assert data['cluster.choose-local'] == 'off'
assert data['performance.client-io-threads'] == 'off'
assert data['performance.quick-read'] == 'off'
assert data['performance.low-prio-threads'] == '32'
assert data['performance.io-cache'] == 'off'
assert data['performance.read-ahead'] == 'off'
assert data['nfs.disable'] == 'on'
|
py | 1a527a72f22a1cc205a5e1f6a2d0694f636a0d01 | from django.apps import AppConfig
class BrevehomeConfig(AppConfig):
name = 'brevehome'
|
py | 1a527ae6c3071f42491f8bdc00487ce68c8ef8e3 | """Contain the unit tests related to the views in app ``catalogs``."""
from django.http.request import HttpRequest
from django.test import TestCase
from teamspirit.catalogs.views import catalog_view
from teamspirit.core.models import Address
from teamspirit.profiles.models import Personal
from teamspirit.users.models import User
class CatalogsViewsTestCase(TestCase):
"""Test the views in the app ``catalogs``."""
def setUp(self):
super().setUp()
# a user in database
self.address = Address.objects.create(
label_first="1 rue de l'impasse",
label_second="",
postal_code="75000",
city="Paris",
country="France"
)
self.personal = Personal.objects.create(
phone_number="01 02 03 04 05",
address=self.address
)
self.user = User.objects.create_user(
email="[email protected]",
first_name="Toto",
password="TopSecret",
personal=self.personal
)
# log this user in
self.client.login(email="[email protected]", password="TopSecret")
# a 'get' request
self.get_request = HttpRequest()
self.get_request.method = 'get'
self.get_request.user = self.user
def test_catalog_view(self):
"""Unit test - app ``catalogs`` - view ``catalog_view``
Test the catalog view.
"""
view = catalog_view
response = view(self.get_request) # type is TemplateResponse
# render the response content
response.render()
html = response.content.decode('utf8')
self.assertEqual(response.status_code, 200)
self.assertTrue(html.startswith('<!DOCTYPE html>'))
self.assertIn('<title>Team Spirit - Catalogue</title>', html)
|
py | 1a527ae7f332da4f3d5fd4a3136c9864b2acb7f9 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('hs_core', '0020_baseresource_collections'),
]
operations = [
migrations.CreateModel(
name='FundingAgency',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('agency_name', models.TextField()),
('award_title', models.TextField(null=True, blank=True)),
('award_number', models.TextField(null=True, blank=True)),
('agency_url', models.URLField(null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_core_fundingagency_related', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
]
|
py | 1a527aed81a48794972d0d62e3de69b10e6d49bf | import requests
import json
from lxml import html
from lxml import etree
from bs4 import BeautifulSoup
import time
import numpy
import getpass
import os
clear = lambda: os.system('cls')
import json
"""
Most of these functions work through REST APIs, but due to lack of documentation about some features,
this script uses also HTTP request scraping (for example in def get_last_mark())
this is the cause for having two functions for logging in, liucLogin() for loggin through http requests
while login() for logging through REST APIs
further investigation in APIs documentation should fix this and make the script work ONLY through REST API.
Function get_last_mark() works through http scraping, and it requires liucLogin()
Please note that the script is working only with students' account. """
#not all urls are from official REST API's endpoints
url_login = "https://sol.liuc.it/esse3/auth/Logon.do"
url_esiti = 'https://sol.liuc.it/esse3/auth/studente/Appelli/BachecaEsiti.do'
url_appelli = 'https://sol.liuc.it/e3rest/api/calesa-service-v1/appelli/'
url_login_end = "https://sol.liuc.it/e3rest/api/login/"
#average endpoint = url_average + matId (get this from login()) + "/medie"
url_average = 'http://sol.liuc.it/e3rest/api/libretto-service-v2/libretti/'
#example: ".../e3rest/api/libretto-service-v2/libretti/999/medie" return average of student with matId 999
url_libretto = 'http://sol.liuc.it/e3rest/api/libretto-service-v2/libretti/'
#start requests session
session = requests.session()
session.get(url_login)
#login through API
#return basic info about the student
def login(username1, pwd):
response = session.get(url_login_end, auth=(username1, pwd))
user_details_json = json.loads(response.text)
user_details = []
matId = (user_details_json["user"]["trattiCarriera"][0]["matId"])
stuId = (user_details_json["user"]["trattiCarriera"][0]["stuId"])
matricola = (user_details_json["user"]["trattiCarriera"][0]["matricola"])
name = (user_details_json["user"]["firstName"])
surname = (user_details_json["user"]["lastName"])
user_details.append(matId)
user_details.append(stuId)
user_details.append(matricola)
user_details.append(name)
user_details.append(surname)
return user_details
#return a matrix with available exams and their details
#this function works through JSON REST API
def getAppelli(username1, pwd):
appelli = session.get(url_appelli, auth=(username1, pwd))
appelli_json = json.loads(appelli.text)
appelli_detail = [[]]
advanced_details_exam = [[]]
#look for exam attributes, so i can search for exams description
#first endpoints = exam id
#second endopoints = input(exam_id)->output(exam_details)
for i in range(len(appelli_json)):
id_appello = appelli_json[i]["adDefAppId"]
id_corso = appelli_json[i]["cdsDefAppId"]
desc_appello = appelli_json[i]["adDes"]
appelli_detail.insert(i, [desc_appello, id_appello, id_corso])
#look for exam details, giving as input exam id
for i in range(len(appelli_detail) - 1):
detail_endpoints = url_appelli + str(appelli_detail[i][2]) + "/" + str(appelli_detail[i][1])
get_exam_info = session.get(detail_endpoints, auth=(username1, pwd))
exam_info_json = json.loads(get_exam_info.text)
""" print(exam_info_json)
print(detail_endpoints) """
for j in range(len(exam_info_json) - 1):
corso = exam_info_json[j]["adDes"]
data_appello = exam_info_json[j]["dataInizioApp"]
data_inizio = exam_info_json[j]["dataInizioIscr"]
data_fine = exam_info_json[j]["dataFineIscr"]
tipo_appello = exam_info_json[j]["desApp"]
advanced_details_exam.insert((j+i), [corso, data_appello, tipo_appello, data_inizio, data_fine])
return advanced_details_exam
#return average and most likely graduation grade
def get_media(username1, pwd):
matricola_id = login(username1, pwd)[0]
personal_url_average = url_average + str(matricola_id) + "/medie"
getAverage = session.get(personal_url_average, auth=(username1,pwd))
average_json = json.loads(getAverage.text)
average = average_json[1]["media"]
votolaurea = average_json[3]["media"]
return average, votolaurea
#return a matrix in which each line contains [exam name, exam grade]
#if an exam has not a grade, return [exam name, "---"]
def get_libretto(username1, pwd):
libretto = [[]]
matricola_id = login(username1, pwd)[0]
personal_url_libretto = url_libretto + str(matricola_id) + "/righe/"
response = session.get(personal_url_libretto, auth = (username1, pwd))
libretto_json = json.loads(response.text)
num_esami_da_dare = 0
for i in range(len(libretto_json)):
esame_libretto = libretto_json[i]["adDes"]
voto_libretto = libretto_json[i]["esito"]["voto"]
if voto_libretto == None:
voto_libretto = "---"
num_esami_da_dare = num_esami_da_dare + 1
libretto.insert(i, [esame_libretto, voto_libretto])
#adding info about how many exam are finished
num_esami_dati = len(libretto_json) - num_esami_da_dare
#insert the info in last line of the list
esami_dati_da_dare = [num_esami_dati, num_esami_da_dare]
return libretto, esami_dati_da_dare
#----------------------------------------------------------------------------------------------------------------
def liucLogin(username1, pwd):
response = session.get(url_login, auth=(username1, pwd))
#salvo la pagina di scelta carriera
tree = etree.HTML(response.text)
element = tree.xpath('//*[@id="gu_toolbar_sceltacarriera"]')
try:
content = etree.tostring(element[0])
url1 = content[108:113].decode('utf-8')
print("Accedo all'ultima carriera disponibile...")
url_carriera = "https://sol.liuc.it/esse3/auth/studente/SceltaCarrieraStudente.do?stu_id=" + url1
response = session.get(url_carriera, auth=(username1, pwd))
if (response.status_code) == 200:
print("Login riuscito. ")
else:
print("Login non riuscito. ")
except:
print("Login non riuscito ")
#check the last grades
def get_last_mark(username1, pwd):
response = session.get(url_esiti, auth=(username1,pwd))
html_esiti = BeautifulSoup(response.text, "html.parser")
#nome deve essere fixato, non trovo il css selector esatto, non funziona neanche con xpath
#controllare documentazione e3rest su esse3 per api json
prof_esame_esito = html_esiti.select('td.detail_table:nth-child(3)')
data_esame_esito = html_esiti.select('td.detail_table:nth-child(1)')
voto_esame_esito = html_esiti.select('td.detail_table:nth-child(5) > form:nth-child(1)')
print(len(prof_esame_esito))
esiti = []
quanti_esiti = len(prof_esame_esito)
for i in range(quanti_esiti):
prof_esame_esito1 = prof_esame_esito[i].get_text()
data_esame_esito1 = data_esame_esito[i].get_text()
voto_esame_esito1 = voto_esame_esito[i].get_text()
info_esito = prof_esame_esito1 + " - " + data_esame_esito1 + " - " + voto_esame_esito1
info_esito = info_esito.replace("\n", "")
esiti.append(info_esito)
return esiti |
py | 1a527ba51de84ff9c0bc982f39815da8283ae8ee | #!/usr/bin/env python3
"""This example demonstrates using the file token manager for refresh tokens.
In order to run this program, you will first need to obtain a valid refresh token. You
can use the `obtain_refresh_token.py` example to help.
In this example, refresh tokens will be saved into a file `refresh_token.txt` relative
to your current working directory. If your current working directory is under version
control it is strongly encouraged you add `refresh_token.txt` to the version control
ignore list.
Usage:
EXPORT praw_client_id=<REDDIT_CLIENT_ID>
EXPORT praw_client_secret=<REDDIT_CLIENT_SECRET>
python3 use_file_token_manager.py
"""
import asyncio
import os
import sys
import aiofiles
import asyncpraw
from asyncpraw.util.token_manager import FileTokenManager
REFRESH_TOKEN_FILENAME = "refresh_token.txt"
async def initialize_refresh_token_file():
if os.path.isfile(REFRESH_TOKEN_FILENAME):
return
refresh_token = input("Initial refresh token value: ")
async with aiofiles.open(REFRESH_TOKEN_FILENAME, "w") as fp:
await fp.write(refresh_token)
async def main():
if "praw_client_id" not in os.environ:
sys.stderr.write("Environment variable ``praw_client_id`` must be defined\n")
return 1
if "praw_client_secret" not in os.environ:
sys.stderr.write(
"Environment variable ``praw_client_secret`` must be defined\n"
)
return 1
await initialize_refresh_token_file()
refresh_token_manager = FileTokenManager(REFRESH_TOKEN_FILENAME)
async with asyncpraw.Reddit(
token_manager=refresh_token_manager,
user_agent="use_file_token_manager/v0 by u/bboe",
) as reddit:
scopes = await reddit.auth.scopes()
if scopes == {"*"}:
print(f"{await reddit.user.me()} is authenticated with all scopes")
elif "identity" in scopes:
print(
f"{await reddit.user.me()} is authenticated with the following scopes:"
f" {scopes}"
)
else:
print(f"You are authenticated with the following scopes: {scopes}")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
sys.exit(loop.run_until_complete(main()))
|
py | 1a527bbd1e25dd54f88d23e052f7e9793ed48834 | import time
from rdfframes.knowledge_graph import KnowledgeGraph
from rdfframes.utils.constants import JoinType
from rdfframes.client.http_client import HttpClientDataFormat, HttpClient
def movies_with_american_actors_cache():
graph = KnowledgeGraph(graph_name='dbpedia')
dataset = graph.feature_domain_range('dbpp:starring', 'movie', 'actor')\
.expand('actor', [('dbpp:birthPlace', 'actor_country'), ('rdfs:label', 'actor_name')])\
.expand('movie', [('rdfs:label', 'movie_name'), ('dcterms:subject', 'subject'),
('dbpp:country', 'movie_country'), ('dbpp:genre', 'genre', True)])\
.cache()
# 26928 Rows. -- 4273 msec.
american_actors = dataset.filter({'actor_country': ['regex(str(?actor_country), "USA")']})
# 1606 Rows. -- 7659 msec.
prolific_actors = dataset.group_by(['actor'])\
.count('movie', 'movie_count', unique=True).filter({'movie_count': ['>= 200']})
#663,769 Rows. -- 76704 msec.
movies = american_actors.join(prolific_actors, join_col_name1='actor', join_type=JoinType.OuterJoin)\
.join(dataset, join_col_name1='actor')
#.select_cols(['movie_name', 'actor_name', 'genre'])
sparql_query = movies.to_sparql()
print(sparql_query)
def movies_with_american_actors():
graph = KnowledgeGraph(graph_name='dbpedia')
dataset1 = graph.feature_domain_range('dbpp:starring', 'movie1', 'actor')\
.expand('actor', [('dbpp:birthPlace', 'actor_country1'), ('rdfs:label', 'actor_name1')])\
.expand('movie1', [('rdfs:label', 'movie_name1'), ('dcterms:subject', 'subject1'),
('dbpp:country', 'movie_country1'), ('dbpp:genre', 'genre1', True)])
# 26928 Rows. -- 4273 msec.
american_actors = dataset1.filter({'actor_country1': ['regex(str(?actor_country1), "USA")']})
# 1606 Rows. -- 7659 msec.
dataset2 = graph.feature_domain_range('dbpp:starring', 'movie2', 'actor')\
.expand('actor', [('dbpp:birthPlace', 'actor_country2'), ('rdfs:label', 'actor_name2')])\
.expand('movie2', [('rdfs:label', 'movie_name2'), ('dcterms:subject', 'subject2'),
('dbpp:country', 'movie_country2'), ('dbpp:genre', 'genre2', True)])
prolific_actors = dataset2.group_by(['actor'])\
.count('movie2', 'movie_count2', unique=True).filter({'movie_count2': ['>= 200']})
#663,769 Rows. -- 76704 msec.
movies = american_actors.join(prolific_actors, join_col_name1='actor', join_type=JoinType.OuterJoin)\
# .join(dataset, join_col_name1='actor')
#.select_cols(['movie_name', 'actor_name', 'genre'])
sparql_query = movies.to_sparql()
print(sparql_query)
def movies_with_american_actors_optional():
graph = KnowledgeGraph(graph_uri='http://dbpedia.org',
prefixes={'dcterms': 'http://purl.org/dc/terms/',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'dbpprop': 'http://dbpedia.org/property/',
'dbpr': 'http://dbpedia.org/resource/'})
dataset = graph.feature_domain_range('dbpprop:starring', domain_col_name='movie', range_col_name='actor')\
.expand('actor', [
RDFPredicate('dbpprop:birthPlace', 'actor_country', optional=True),
RDFPredicate('rdfs:label', 'actor_name', optional=True)])\
.expand('movie', [
RDFPredicate('rdfs:label', 'movie_name', optional=True),
RDFPredicate('dcterms:subject', 'subject', optional=True),
RDFPredicate('dbpprop:country', 'movie_country', optional=True)])\
.cache()
# 26928 Rows. -- 4273 msec.
american_actors = dataset.filter({'actor_country': ['regex(str(?actor_country), "USA")']})
# 1606 Rows. -- 7659 msec.
prolific_actors = dataset.group_by(['actor'])\
.count('movie', 'movie_count', unique=True).filter({'movie_count': ['>= 20', '<=30']})
# 663769 Rows. -- 76511 msec.
movies = american_actors.join(prolific_actors, join_col_name1='actor', join_type=JoinType.OuterJoin)\
.join(dataset, join_col_name1='actor')
sparql_query = movies.to_sparql()
print(sparql_query)
endpoint = 'http://10.161.202.101:8890/sparql/'
output_format = HttpClientDataFormat.PANDAS_DF
client = HttpClient(endpoint_url=endpoint, return_format=output_format)
df = dataset.execute(client, return_format=output_format)
print(df)
#movies_with_american_actors_optional()
start = time.time()
movies_with_american_actors()
duration = time.time()-start
print("Duration = {} sec".format(duration))
|
py | 1a527bd1f8176fffb7fd44bef160286ce9a5a7e9 | # Generated by Django 3.0.1 on 2020-01-29 08:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('characters', '0005_merge_20200126_2005'),
]
operations = [
migrations.RenameField(
model_name='character',
old_name='skill_points',
new_name='growth_points',
),
migrations.RenameField(
model_name='character',
old_name='force',
new_name='strength',
),
]
|
py | 1a527cf75f5b605ed5e27d485e83caf789e261a1 | ### Noisy DQN Procgen Config ###
env = {
# "name": it should be defined in the command. ex) python main.py --config config.AGENT.procgen --env.name coinrun
"render": False,
"gray_img": True,
"stack_frame": 4,
"no_op": False,
"reward_clip": True,
}
agent = {
"name": "noisy",
"network": "noisy",
"head": "cnn",
"gamma": 0.99,
"explore_ratio": 0.1,
"buffer_size": 1000000,
"batch_size": 32,
"start_train_step": 100000,
"target_update_period": 10000,
# noisy
"noise_type": "factorized", # [independent, factorized]
}
optim = {
"name": "adam",
"lr": 2.5e-4,
}
train = {
"training": True,
"load_path": None,
"run_step": 30000000,
"print_period": 10000,
"save_period": 100000,
"eval_iteration": 5,
"record": True,
"record_period": 300000,
# distributed setting
"update_period": 32,
"num_workers": 16,
}
|
py | 1a527dbfccf56c328622797c6237de80cc07bc1b | from django.utils.version import get_version
VERSION = (1, 8, 5, 'final', 0)
__version__ = get_version(VERSION)
def setup():
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
"""
from django.apps import apps
from django.conf import settings
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
apps.populate(settings.INSTALLED_APPS)
|
py | 1a527e84a49c34ec422fa969b713bdc72fce7787 | #!/usr/bin/env python3
""" Make satellite test data """
import os
from pathlib import Path
import numcodecs
import pandas as pd
import xarray as xr
import nowcasting_dataset
START = pd.Timestamp("2020-04-01T12:00")
END = pd.Timestamp("2020-04-01T14:00")
OUTPUT_PATH = Path(os.path.dirname(nowcasting_dataset.__file__)).parent / "tests" / "data"
print(f"{OUTPUT_PATH=}")
# HRV Path
HRV_SAT_FILENAME = (
"/mnt/storage_ssd_8tb/data/ocf/solar_pv_nowcasting/nowcasting_dataset_pipeline/"
"satellite/EUMETSAT/SEVIRI_RSS/zarr/v3/eumetsat_seviri_hrv_uk.zarr"
)
# Non-HRV path
SAT_FILENAME = (
"/mnt/storage_ssd_8tb/data/ocf/solar_pv_nowcasting/nowcasting_dataset_pipeline/"
"satellite/EUMETSAT/SEVIRI_RSS/zarr/v3/eumetsat_seviri_uk.zarr"
)
def generate_satellite_test_data():
"""Main function to make satelllite test data"""
# Create HRV data
output_filename = OUTPUT_PATH / "hrv_sat_data.zarr"
print("Opening", HRV_SAT_FILENAME)
print("Writing satellite tests data to", output_filename)
# This opens all the HRV satellite data
hrv_sat_data = xr.open_mfdataset(
HRV_SAT_FILENAME, chunks={}, mode="r", engine="zarr", concat_dim="time", combine="nested"
)
# v3 of the HRV data doesn't use variables. Instead the HRV data is in the 'data' DataArray.
# hrv_sat_data = hrv_sat_data.sel(variable=["HRV"], time=slice(START, END))
# just take a bit of the time, to keep size of file now
hrv_sat_data = hrv_sat_data.sel(time=slice(START, END))
# Adds compression and chunking
encoding = {
"data": {"compressor": numcodecs.get_codec(dict(id="bz2", level=5))},
"time": {"units": "nanoseconds since 1970-01-01"},
}
# Write the HRV data to disk
hrv_sat_data.to_zarr(
output_filename, mode="w", consolidated=True, encoding=encoding, compute=True
)
# Now do the exact same with the non-HRV data
output_filename = OUTPUT_PATH / "sat_data.zarr"
print("Writing satellite tests data to", output_filename)
sat_data = xr.open_mfdataset(
SAT_FILENAME, chunks={}, mode="r", engine="zarr", concat_dim="time", combine="nested"
)
sat_data = sat_data.sel(variable=["IR_016"], time=slice(START, END))
sat_data.to_zarr(output_filename, mode="w", consolidated=True, encoding=encoding, compute=True)
if __name__ == "__main__":
generate_satellite_test_data()
|
py | 1a527ea10789891b3e7f00bca604dcc7e475c074 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.BAM/Mono_16/udhr_Latn.BAM_Mono_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py | 1a527ec7e10e041cbc2c574b15f98359341d8145 | from primitiv import Device
from primitiv import tensor_functions as tF
from primitiv.devices import Naive
import numpy as np
import unittest
class TensorFunctionsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.device = Naive()
Device.set_default(self.device)
self.a = np.array([[1, 2], [3, 4]], np.float32)
self.b = np.array([[1, 1], [4, 8]], np.float32)
def tearDown(self):
pass
def test_tensor_pos(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((+x).to_ndarrays()[0] == self.a).all())
def test_tensor_neg(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((-x).to_ndarrays()[0] == -self.a).all())
def test_tensor_add(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x + y).to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
self.assertTrue(((x + 2).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
self.assertTrue(((2 + x).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
def test_tensor_sub(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x - y).to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
self.assertTrue(((x - 2).to_ndarrays()[0] == np.array([[-1, 0], [1, 2]])).all())
self.assertTrue(((2 - x).to_ndarrays()[0] == np.array([[1, 0], [-1, -2]])).all())
def test_tensor_mul(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x * y).to_ndarrays()[0] == np.array([[1, 2], [12, 32]])).all())
self.assertTrue(((x * 2).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
self.assertTrue(((2 * x).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
def test_tensor_matmul(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x @ y).to_ndarrays()[0] == np.array([[9, 17], [19, 35]])).all())
self.assertRaises(TypeError, lambda: x @ 2)
self.assertRaises(TypeError, lambda: 2 @ x)
def test_tensor_truediv(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x / y).to_ndarrays()[0] == np.array([[1, 2], [0.75, 0.5]])).all())
self.assertTrue(((x / 2).to_ndarrays()[0] == np.array([[0.5, 1], [1.5, 2]])).all())
self.assertTrue(((2 / y).to_ndarrays()[0] == np.array([[2, 2], [0.5, 0.25]])).all())
def test_tensor_pow(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(np.isclose((x ** y).to_ndarrays()[0], np.array([[1, 2], [81, 65536]])).all())
self.assertTrue(np.isclose((x ** 2).to_ndarrays()[0], np.array([[1, 4], [9, 16]])).all())
self.assertTrue(np.isclose((2 ** x).to_ndarrays()[0], np.array([[2, 4], [8, 16]])).all())
self.assertTrue(np.isclose((x ** -2).to_ndarrays()[0], np.array([[1, 1/4], [1/9, 1/16]])).all())
input_arr = np.array([1, -1, 3, -3, 5, -5])
x = tF.input(input_arr)
self.assertTrue(((x ** 6).to_ndarrays()[0] == np.array([1, 1, 729, 729, 15625, 15625])).all())
self.assertTrue(((x ** 9).to_ndarrays()[0] == np.array([1, -1, 19683, -19683, 1953125, -1953125])).all())
input_arr = np.array([1, -1])
x = tF.input(input_arr)
self.assertTrue(((x ** 0x7fffffff).to_ndarrays()[0] == np.array([1, -1])).all())
self.assertTrue(((x ** -0x80000000).to_ndarrays()[0] == np.array([1, 1])).all())
self.assertRaises(TypeError, lambda: pow(x, y, 2))
def test_tensor_iadd(self):
x = tF.input(self.a)
y = tF.input(self.b)
x_tmp = x
x += y
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
def test_tensor_isub(self):
x = tF.input(self.a)
y = tF.input(self.b)
x_tmp = x
x -= y
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
def test_tensor_imul(self):
x = tF.input(self.a)
x_tmp = x
x *= 2
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
|
py | 1a527ee7e3067f29fbfe48463fc348e87c7d4500 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_isafe
----------------------------------
Tests for `isafe` module.
"""
import unittest
from isafe import isafe
class Testisafe(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a527efdbdf9edd65b5002d1f5d616437145c746 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://www.bggofurther.com/2015/01/create-an-interactive-command-line-menu-using-python/
# This tool won't work in Visual Studio Code (as an example).
# I don't know why this is the case but just run it in cmd.exe
import sys
import os
import collections
import ctypes
from subprocess import Popen, PIPE
import locale
import gui # <-- change name !!
import header
from hurry.filesize import alternative, size # pip install hurry.filesize
from prompt_toolkit import prompt
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.token import Token
# set locale to default to get thousands separators
locale.setlocale(locale.LC_ALL, '')
# Pointer to large unsigned integer
PULARGE_INTEGER = ctypes.POINTER(ctypes.c_ulonglong)
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
kernel32.GetDiskFreeSpaceExW.argtypes = (
ctypes.c_wchar_p,) + (PULARGE_INTEGER,) * 3
def get_size(start_path='.'):
"""
https://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
"""
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return size(total_size, system=alternative)
def get_size2(string):
value = size(string, system=alternative)
return value
def cutit(s, n):
"""
cute function that removes chars
s = string
n = char to remove
"""
return s[n:]
class UsageTuple(collections.namedtuple('UsageTuple', 'total, used, free')):
def __str__(self):
# Add thousands separator to numbers displayed
return '{}, {}, {}'.format(*self)
def disk_usage(path):
try:
# allows str or bytes (or os.PathLike in Python 3.6+)
path = os.fsdecode(path)
except AttributeError: # fsdecode() not added until Python 3.2
pass
# Define variables to receive results when passed as "by reference" arguments
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), ctypes.c_ulonglong()
success = kernel32.GetDiskFreeSpaceExW(
path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if not success:
error_code = ctypes.get_last_error()
if not success:
windows_error_message = ctypes.FormatError(error_code)
raise ctypes.WinError(error_code, '{} {!r}'.format(
windows_error_message, path))
used = total.value - free.value
return UsageTuple(total.value, used, free.value)
def drive_parser(letter):
total, used, free = disk_usage(letter)
total = get_size2(total)
free = get_size2(free)
return free, total
def get_bottom_toolbar_tokens(cli):
free, total = drive_parser('D:/')
return [(Token.Toolbar, ' app folder: {} patch folder: {} SDCard: {} of {} free'.format(get_size('app'), get_size('patch'), free, total))]
def input(string): # it's intendet to redefine input() XD
style = style_from_dict({
Token.Toolbar: '#ffffff bg:#333333',
})
output = prompt(
string, get_bottom_toolbar_tokens=get_bottom_toolbar_tokens, style=style)
return output
# Main definition - constants
menu_actions = {}
sub_menu = {}
selection = []
name, titleid = gui.send_variables()
# =======================
# MENUS FUNCTIONS
# =======================
def clearscreen(numlines=100):
"""
Clear the console.numlines is an optional argument used only as a fall-back.
"""
# Thanks to Steven D'Aprano, http://www.velocityreviews.com/forums
if os.name == "posix":
# Unix/Linux/MacOS/BSD/etc
os.system('clear')
elif os.name in ("nt", "dos", "ce"):
# DOS/Windows
os.system('CLS')
else:
# Fallback for other operating systems.
print('\n' * numlines)
def syscmd(cmd):
"""
executes the given command with a better way than using
os.system() (I don't know why but it seems to be bad practice !)
It also returns the exe output instead of printing it :)
"""
cmoa = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
output, error = cmoa.communicate()
return output, error
# Main menu
def main_menu():
clearscreen()
print("1.Start the download")
print("2.Update Database")
print("3.Search for Games")
print("4.Load the queue from 'input.txt'")
print("5.View the queue")
print("6.Exit")
choice = input(">> ")
exec_menu(choice)
return
# Execute menu
def exec_menu(choice):
clearscreen()
ch = choice.lower()
if ch == '':
menu_actions['main_menu']()
else:
try:
menu_actions[ch]()
except KeyError:
print("Invalid selection, please try again.\n")
menu_actions['main_menu']()
return
def start_download():
clearscreen()
if selection == []:
print("Nothing to download.")
input('\n<press enter>')
menu_actions['main_menu']()
else:
for tid in selection:
header.start_download(tid, 'psv')
input('\n<press enter>')
menu_actions['main_menu']()
def update_database():
clearscreen()
header.initial_setup()
input('\n<press enter>')
menu_actions['main_menu']()
def search():
search_input, selected = gui.start_searching(None)
for item in selected:
selection.append(item)
menu_actions['main_menu']()
def load():
clearscreen()
if header.exists('input.txt') is False:
print("Enter the Filename:")
filename = header.input_txt(input(">> "))
else:
filename = 'input.txt'
list1 = header.input_txt(filename)
for item in list1:
selection.append(item)
input('\n<press enter>')
menu_actions['main_menu']()
def view():
for item in selection:
position = titleid.index(item)
print(name[position], '[' + item + ']')
input('\n<press enter>')
menu_actions['main_menu']()
# Exit program
def exit():
sys.exit()
# =======================
# MENUS DEFINITIONS
# =======================
# Menu definition
menu_actions = {
'main_menu': main_menu,
'1': start_download,
'2': update_database,
'3': search,
'4': load,
'5': view,
'6': exit,
}
sub_menu = {
'home': search,
}
# =======================
# MAIN PROGRAM
# =======================
# Main Program
if __name__ == "__main__":
# Launch main menu
main_menu()
|
py | 1a52800420148cfb471c540c5a229618dfaf47ae | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.testsdk import ScenarioTest, record_only
import json
import os
id_sql = '/subscriptions/da364f0f-307b-41c9-9d47-b7413ec45535/resourceGroups/pstestwlRG1bca8/providers/Microsoft.Compute/virtualMachines/pstestwlvm1bca8'
item_id_sql = '/Subscriptions/da364f0f-307b-41c9-9d47-b7413ec45535/resourceGroups/pstestwlRG1bca8/providers/Microsoft.RecoveryServices/vaults/pstestwlRSV1bca8/backupFabrics/Azure/protectionContainers/vmappcontainer;compute;pstestwlrg1bca8;pstestwlvm1bca8/protectedItems/sqldatabase;mssqlserver;testdb'
sub_sql = 'da364f0f-307b-41c9-9d47-b7413ec45535'
rg_sql = 'pstestwlRG1bca8'
vault_sql = 'pstestwlRSV1bca8'
container_sql = 'VMAppContainer;Compute;pstestwlRG1bca8;pstestwlvm1bca8'
container_friendly_sql = 'pstestwlvm1bca8'
item_auto_sql = 'SQLInstance;mssqlserver'
item1_sql = 'SQLDataBase;MSSQLSERVER;testdb'
item2_sql = 'msdb'
backup_entity_friendly_name_sql = 'MSSQLSERVER/testdb1 [pstestwlvm1bca8]'
class BackupTests(ScenarioTest, unittest.TestCase):
# SQL workload tests start here
# Please make sure you have the following setup in place before running the tests -
# For the tests using pstestwlvm1bca8 and pstestwlRSV1bca8 -
# Each test will register the container at the start and unregister at the end of the test
# Make sure that the container is not already registered since the start of the test
# For the tests using PSTestVM664243 and hiagaSrcVault -
# Each test will register the container at the start and unregister at the end of the test
# Make sure that the container is not already registered since the start of the test
# Note: Archive and CRR test uses different subscription. Please comment them out when running the whole test suite at once. And run those tests individually.
@record_only()
def test_backup_wl_sql_container(self):
self.kwargs.update({
'vault': "hiagaSrcVault",
'name': "VMAppContainer;Compute;hiagaSrcRG2;PSTestVM664243",
'fname': "PSTestVM664243",
'rg': "hiagaSrcRG",
'wt': 'MSSQL',
'sub': sub_sql,
'id': "/subscriptions/da364f0f-307b-41c9-9d47-b7413ec45535/resourceGroups/HIAGASRCRG2/providers/Microsoft.Compute/virtualMachines/PSTestVM664243"
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id} ')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
container_json = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check('properties.friendlyName', '{fname}'),
self.check('properties.healthStatus', 'Healthy'),
self.check('properties.registrationStatus', 'Registered'),
self.check('resourceGroup', '{rg}')
]).get_output_in_json()
self.kwargs['container_name'] = container_json['name']
self.cmd('backup container show -n {container_name} -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check('properties.friendlyName', '{fname}'),
self.check('properties.healthStatus', 'Healthy'),
self.check('properties.registrationStatus', 'Registered'),
self.check('name', '{container_name}'),
self.check('resourceGroup', '{rg}')
]).get_output_in_json()
self.assertIn(self.kwargs['vault'].lower(), container_json['id'].lower())
self.assertIn(self.kwargs['name'].lower(), container_json['name'].lower())
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?properties.friendlyName == '{fname}'])", 1)])
self.cmd('backup container re-register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} -y --container-name {name}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_policy(self):
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'default': 'HourlyLogBackup',
'rg': rg_sql,
'item': item1_sql,
'id': id_sql,
'item_id': item_id_sql,
'pit': 'SQLDataBase',
'policy_new': self.create_random_name('clitest-policy', 24)
})
self.kwargs['policy1_json'] = self.cmd('backup policy show -g {rg} -v {vault} -n {policy}', checks=[
self.check('name', '{policy}'),
self.check('resourceGroup', '{rg}')
]).get_output_in_json()
self.kwargs['policy_json'] = json.dumps(self.kwargs['policy1_json'], separators=(',', ':')).replace('\'', '\\\'').replace('"', '\\"')
self.cmd("backup policy create -g {rg} -v {vault} --policy {policy_json} --backup-management-type AzureWorkload --workload-type {wt} --name {policy_new}", checks=[
self.check('name', '{policy_new}'),
self.check('resourceGroup', '{rg}')
])
self.cmd('backup policy list -g {rg} -v {vault}', checks=[
self.check("length([?name == '{default}'])", 1),
self.check("length([?name == '{policy}'])", 1),
self.check("length([?name == '{policy_new}'])", 1)
])
self.kwargs['policy1_json']['properties']['settings']['isCompression'] = 'true'
self.kwargs['policy1_json']['properties']['settings']['issqlcompression'] = 'true'
self.kwargs['policy1_json'] = json.dumps(self.kwargs['policy1_json'], separators=(',', ':')).replace('\'', '\\\'').replace('"', '\\"')
self.cmd("backup policy set -g {rg} -v {vault} --policy {policy1_json} -n {policy_new}", checks=[
self.check('name', '{policy_new}'),
self.check('resourceGroup', '{rg}')
])
self.cmd("backup policy set -g {rg} -v {vault} --backup-management-type AzureWorkload --fix-for-inconsistent-items -n {policy_new}", checks=[
self.check('name', '{policy_new}'),
self.check('resourceGroup', '{rg}')
])
self.cmd('backup policy show -g {rg} -v {vault} -n {policy_new}', checks=[
self.check('name', '{policy_new}'),
self.check('resourceGroup', '{rg}')
])
self.cmd('backup policy delete -g {rg} -v {vault} -n {policy_new}')
self.cmd('backup policy list -g {rg} -v {vault}', checks=[
self.check("length([?name == '{default}'])", 1),
self.check("length([?name == '{policy}'])", 1),
self.check("length([?name == '{policy_new}'])", 0)
])
@record_only()
def test_backup_wl_sql_protectable_item(self):
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'default': 'HourlyLogBackup',
'rg': rg_sql,
'item': item1_sql,
'id': id_sql,
'item_id': item_id_sql,
'pit': 'SQLDataBase',
'protectable_item_name': 'testdb',
'pit_hana': 'SAPHanaDatabase'
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.kwargs['container1'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --query properties.friendlyName --backup-management-type AzureWorkload').get_output_in_json()
self.cmd('backup protectable-item list -g {rg} --vault-name {vault} --workload-type {wt}', checks=[
self.check("length([?properties.friendlyName == '{protectable_item_name}'])", 1)
])
self.cmd('backup protectable-item show -g {rg} --vault-name {vault} --name {protectable_item_name} --workload-type {wt} --protectable-item-type {pit} --server-name {fname}', checks=[
self.check('properties.friendlyName', '{protectable_item_name}'),
self.check('properties.protectableItemType', '{pit}'),
self.check('properties.serverName', '{fname}'),
self.check('resourceGroup', '{rg}')
])
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_rp(self):
resource_group = rg_sql.lower()
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'rg': resource_group,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'item': item1_sql,
'pit': 'SQLDatabase',
'item_id': item_id_sql,
'id': id_sql,
'fitem': 'testdb'
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.cmd('backup protection enable-for-azurewl -v {vault} -g {rg} -p {policy} --protectable-item-type {pit} --protectable-item-name {item} --server-name {fname} --workload-type {wt}', checks=[
self.check("properties.entityFriendlyName", '{fitem}'),
self.check("properties.operation", "ConfigureBackup"),
self.check("properties.status", "Completed"),
self.check("resourceGroup", '{rg}')
])
self.kwargs['container1'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload --query name').get_output_in_json()
self.cmd('backup recoverypoint list -g {rg} -v {vault} -c {name} -i {item} --workload-type {wt} --query [].name', checks=[
self.check("length(@)", 1)
])
rp1_json = self.cmd('backup recoverypoint show-log-chain -g {rg} -v {vault} -c {name} -i {item} --workload-type {wt}').get_output_in_json()
self.assertIn(vault_sql.lower(), rp1_json[0]['id'].lower())
self.assertIn(container_sql.lower(), rp1_json[0]['id'].lower())
rp2_json = self.cmd('backup recoverypoint show-log-chain -g {rg} -v {vault} -c {name} -i {item} --workload-type {wt}').get_output_in_json()
self.assertIn(vault_sql.lower(), rp2_json[0]['id'].lower())
self.assertIn(container_sql.lower(), rp2_json[0]['id'].lower())
self.cmd('backup protection disable -v {vault} -g {rg} -c {container1} --backup-management-type AzureWorkload --workload-type {wt} -i {item} -y --delete-backup-data true')
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_auto_protection(self):
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'default': 'HourlyLogBackup',
'rg': rg_sql,
'item': item_auto_sql,
'fitem': item_auto_sql.split(';')[-1],
'id': id_sql,
'item_id': item_id_sql,
'pit': 'SQLInstance',
'entityFriendlyName': backup_entity_friendly_name_sql
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.cmd('backup protection auto-enable-for-azurewl -v {vault} -g {rg} -p {policy} --protectable-item-name {item} --protectable-item-type {pit} --server-name {fname} --workload-type {wt}')
protectable_item_json = self.cmd('backup protectable-item show -v {vault} -g {rg} -n {item} --protectable-item-type {pit} --server-name {fname} --workload-type {wt}', checks=[
self.check("properties.isAutoProtected", True)]).get_output_in_json()
self.assertIn(self.kwargs['policy'], protectable_item_json['properties']['autoProtectionPolicy'])
self.cmd('backup protection auto-disable-for-azurewl -v {vault} -g {rg} --protectable-item-name {item} --protectable-item-type {pit} --server-name {fname} --workload-type {wt}')
self.cmd('backup protectable-item show -v {vault} -g {rg} -n {item} --protectable-item-type {pit} --server-name {fname} --workload-type {wt}', checks=[
self.check("properties.isAutoProtected", False),
self.check("properties.autoProtectionPolicy", None)])
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_item(self):
resource_group = rg_sql.lower()
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'rg': resource_group,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'item': item1_sql,
'pit': 'SQLDatabase',
'item_id': item_id_sql,
'id': id_sql,
'fitem': 'testdb'
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.cmd('backup protection enable-for-azurewl -v {vault} -g {rg} -p {policy} --protectable-item-type {pit} --protectable-item-name {item} --server-name {fname} --workload-type {wt}')
self.kwargs['container1'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload --query name').get_output_in_json()
item1_json = self.cmd('backup item show -g {rg} -v {vault} -c {name} -n {item} --backup-management-type AzureWorkload --workload-type {wt}', checks=[
self.check('properties.friendlyName', '{fitem}'),
self.check('properties.protectedItemHealthStatus', 'IRPending'),
self.check('properties.protectionState', 'IRPending'),
self.check('properties.protectionStatus', 'Healthy'),
self.check('resourceGroup', '{rg}')
]).get_output_in_json()
self.assertIn(self.kwargs['vault'].lower(), item1_json['id'].lower())
self.assertIn(self.kwargs['fname'].lower(), item1_json['properties']['containerName'].lower())
self.assertIn(self.kwargs['fname'].lower(), item1_json['properties']['sourceResourceId'].lower())
self.assertIn(self.kwargs['policy'].lower(), item1_json['properties']['policyId'].lower())
self.kwargs['container1_fullname'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload --query name').get_output_in_json()
self.cmd('backup item show -g {rg} -v {vault} -c {container1_fullname} -n {item} --backup-management-type AzureWorkload --workload-type {wt}', checks=[
self.check('properties.friendlyName', '{fitem}'),
self.check('properties.protectedItemHealthStatus', 'IRPending'),
self.check('properties.protectionState', 'IRPending'),
self.check('properties.protectionStatus', 'Healthy'),
self.check('resourceGroup', '{rg}')
])
self.kwargs['item1_fullname'] = item1_json['name']
self.cmd('backup item show -g {rg} -v {vault} -c {container1_fullname} -n {item1_fullname} --backup-management-type AzureWorkload --workload-type SAPHanaDatabase', checks=[
self.check('properties.friendlyName', '{fitem}'),
self.check('properties.protectedItemHealthStatus', 'IRPending'),
self.check('properties.protectionState', 'IRPending'),
self.check('properties.protectionStatus', 'Healthy'),
self.check('resourceGroup', '{rg}')
])
self.cmd('backup item list -g {rg} -v {vault} -c {container1} --backup-management-type AzureWorkload --workload-type SQLDataBase', checks=[
self.check("length([?properties.friendlyName == '{fitem}'])", 1)
])
self.cmd('backup item list -g {rg} -v {vault} -c {container1_fullname} --backup-management-type AzureWorkload --workload-type SQLDataBase', checks=[
self.check("length([?properties.friendlyName == '{fitem}'])", 1)
])
self.cmd('backup item list -g {rg} -v {vault} --backup-management-type AzureWorkload --workload-type SQLDataBase', checks=[
self.check("length([?properties.friendlyName == '{fitem}'])", 1)
])
self.cmd('backup item set-policy -g {rg} -v {vault} -c {container1} -n {item1_fullname} -p {policy} --backup-management-type AzureWorkload --workload-type SQLDataBase', checks=[
self.check("properties.entityFriendlyName", '{fitem}'),
self.check("properties.operation", "ConfigureBackup"),
self.check("properties.status", "Completed"),
self.check("resourceGroup", '{rg}')
])
item1_json = self.cmd('backup item show -g {rg} -v {vault} -c {container1} -n {item} --backup-management-type AzureWorkload --workload-type SQLDataBase').get_output_in_json()
self.assertIn("HourlyLogBackup".lower(), item1_json['properties']['policyId'].lower())
self.cmd('backup protection disable -v {vault} -g {rg} -c {container1} --backup-management-type AzureWorkload --workload-type {wt} -i {item} -y --delete-backup-data true')
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_protection(self):
resource_group = rg_sql.lower()
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'rg': resource_group,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'item': item1_sql,
'pit': 'SQLDatabase',
'item_id': item_id_sql,
'id': id_sql,
'fitem': 'testdb'
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.cmd('backup protection enable-for-azurewl -v {vault} -g {rg} -p {policy} --protectable-item-type {pit} --protectable-item-name {item} --server-name {fname} --workload-type {wt}', checks=[
self.check("properties.entityFriendlyName", '{fitem}'),
self.check("properties.operation", "ConfigureBackup"),
self.check("properties.status", "Completed"),
self.check("resourceGroup", '{rg}')
])
self.kwargs['container1'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload --query name').get_output_in_json()
self.kwargs['backup_job'] = self.cmd('backup protection backup-now -v {vault} -g {rg} -i {item} -c {name} --backup-type Full --enable-compression false', checks=[
self.check("properties.status", "InProgress"),
self.check("resourceGroup", '{rg}')
]).get_output_in_json()
self.assertIn("Backup", self.kwargs['backup_job']['properties']['operation'])
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job}')
self.cmd('backup item show -g {rg} -v {vault} -c {container1} -n {item} --backup-management-type AzureWorkload', checks=[
self.check('properties.friendlyName', '{fitem}'),
self.check('properties.protectedItemHealthStatus', 'Healthy'),
self.check('properties.protectionState', 'Protected'),
self.check('properties.protectionStatus', 'Healthy'),
self.check('resourceGroup', '{rg}')
])
self.cmd('backup protection disable -v {vault} -g {rg} -i {item} -c {name} --backup-management-type AzureWorkload -y', checks=[
self.check("properties.entityFriendlyName", '{fitem}'),
self.check("properties.operation", "DisableBackup"),
self.check("properties.status", "Completed"),
self.check("resourceGroup", '{rg}')
])
self.cmd('backup item show -g {rg} -v {vault} -c {container1} -n {item} --backup-management-type AzureWorkload', checks=[
self.check("properties.friendlyName", '{fitem}'),
self.check("properties.protectionState", "ProtectionStopped"),
self.check("resourceGroup", '{rg}')
])
self.cmd('backup protection disable -v {vault} -g {rg} -c {container1} --backup-management-type AzureWorkload --workload-type {wt} -i {item} -y --delete-backup-data true')
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_restore(self):
resource_group = rg_sql.lower()
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'rg': resource_group,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'item': item1_sql,
'fitem': 'testdb',
'id': id_sql,
'pit': 'SQLDatabase',
'item_id': item_id_sql,
'titem': 'testdb_restored'
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.cmd('backup protection enable-for-azurewl -v {vault} -g {rg} -p {policy} --protectable-item-type {pit} --protectable-item-name {item} --server-name {fname} --workload-type {wt}', checks=[
self.check("properties.entityFriendlyName", '{fitem}'),
self.check("properties.operation", "ConfigureBackup"),
self.check("properties.status", "Completed"),
self.check("resourceGroup", '{rg}')
])
self.kwargs['container1'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload --query name').get_output_in_json()
self.kwargs['backup_job'] = self.cmd('backup protection backup-now -v {vault} -g {rg} -i {item} -c {name} --backup-type Full --enable-compression false', checks=[
self.check("properties.status", "InProgress"),
self.check("resourceGroup", '{rg}')
]).get_output_in_json()
self.assertIn("Backup", self.kwargs['backup_job']['properties']['operation'])
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job}')
self.cmd('backup item show -g {rg} -v {vault} -c {container1} -n {item} --backup-management-type AzureWorkload', checks=[
self.check('properties.friendlyName', '{fitem}'),
self.check('properties.protectedItemHealthStatus', 'Healthy'),
self.check('properties.protectionState', 'Protected'),
self.check('properties.protectionStatus', 'Healthy'),
self.check('resourceGroup', '{rg}')
])
self.kwargs['rp'] = self.cmd('backup recoverypoint list -g {rg} -v {vault} -c {name} -i {item} --workload-type {wt} --query [0]').get_output_in_json()
self.kwargs['rp'] = self.kwargs['rp']['name']
self.kwargs['rc'] = json.dumps(self.cmd('backup recoveryconfig show --vault-name {vault} -g {rg} --restore-mode AlternateWorkloadRestore --rp-name {rp} --item-name {item} --container-name {container1} --target-item-name {titem} --target-server-type SQLInstance --target-server-name {fname} --workload-type {wt}').get_output_in_json(), separators=(',', ':'))
with open("recoveryconfig_sql_restore.json", "w") as f:
f.write(self.kwargs['rc'])
self.kwargs['backup_job'] = self.cmd('backup restore restore-azurewl --vault-name {vault} -g {rg} --recovery-config recoveryconfig_sql_restore.json', checks=[
self.check("properties.operation", "Restore"),
self.check("properties.status", "InProgress"),
self.check("resourceGroup", '{rg}')
]).get_output_in_json()
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job}')
self.kwargs['rc'] = json.dumps(self.cmd('backup recoveryconfig show --vault-name {vault} -g {rg} --restore-mode OriginalWorkloadRestore --item-name {item} --container-name {container1} --rp-name {rp}').get_output_in_json(), separators=(',', ':'))
with open("recoveryconfig_sql_restore.json", "w") as f:
f.write(self.kwargs['rc'])
self.kwargs['backup_job'] = self.cmd('backup restore restore-azurewl --vault-name {vault} -g {rg} --recovery-config recoveryconfig_sql_restore.json', checks=[
self.check("properties.operation", "Restore"),
self.check("properties.status", "InProgress"),
self.check("resourceGroup", '{rg}')
]).get_output_in_json()
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job}')
self.cmd('backup protection disable -v {vault} -g {rg} -c {name} --backup-management-type AzureWorkload --workload-type {wt} -i {item} -y --delete-backup-data true')
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_restore_as_files(self):
resource_group = rg_sql.lower()
self.kwargs.update({
'vault': vault_sql,
'name': container_sql,
'rg': resource_group,
'fname': container_friendly_sql,
'policy': 'HourlyLogBackup',
'wt': 'MSSQL',
'sub': sub_sql,
'item': item1_sql,
'fitem': 'testdb',
'id': id_sql,
'pit': 'SQLDatabase',
'item_id': item_id_sql,
'titem': 'testdb_restored'
})
self.cmd('backup container register -v {vault} -g {rg} --backup-management-type AzureWorkload --workload-type {wt} --resource-id {id}')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.cmd('backup protection enable-for-azurewl -v {vault} -g {rg} -p {policy} --protectable-item-type {pit} --protectable-item-name {item} --server-name {fname} --workload-type {wt}', checks=[
self.check("properties.entityFriendlyName", '{fitem}'),
self.check("properties.operation", "ConfigureBackup"),
self.check("properties.status", "Completed"),
self.check("resourceGroup", '{rg}')
])
self.kwargs['container1'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload --query name').get_output_in_json()
self.kwargs['backup_job'] = self.cmd('backup protection backup-now -v {vault} -g {rg} -i {item} -c {name} --backup-type Full --enable-compression false', checks=[
self.check("properties.status", "InProgress"),
self.check("resourceGroup", '{rg}')
]).get_output_in_json()
self.assertIn("Backup", self.kwargs['backup_job']['properties']['operation'])
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job}')
self.cmd('backup item show -g {rg} -v {vault} -c {container1} -n {item} --backup-management-type AzureWorkload', checks=[
self.check('properties.protectedItemHealthStatus', 'Healthy'),
self.check('properties.protectionState', 'Protected'),
self.check('properties.protectionStatus', 'Healthy'),
self.check('resourceGroup', '{rg}')
])
self.kwargs['rp'] = self.cmd('backup recoverypoint list -g {rg} -v {vault} -c {name} -i {item} --workload-type {wt} --query [0]').get_output_in_json()
self.kwargs['rp'] = self.kwargs['rp']['name']
self.kwargs['rc'] = json.dumps(self.cmd('backup recoveryconfig show --vault-name {vault} -g {rg} --restore-mode RestoreAsFiles --rp-name {rp} --filepath "C:\" --target-container-name {container1} --item-name {item} --container-name {container1} --workload-type {wt}').get_output_in_json(), separators=(',', ':'))
with open("recoveryconfig_sql_raf.json", "w") as f:
f.write(self.kwargs['rc'])
self.kwargs['backup_job'] = self.cmd('backup restore restore-azurewl --vault-name {vault} -g {rg} --recovery-config recoveryconfig_sql_raf.json', checks=[
self.check("properties.operation", "Restore"),
self.check("properties.status", "InProgress"),
self.check("resourceGroup", '{rg}')
]).get_output_in_json()
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job}')
self.cmd('backup protection disable -v {vault} -g {rg} -c {name} --backup-management-type AzureWorkload --workload-type {wt} -i {item} -y --delete-backup-data true')
self.cmd('backup container unregister -v {vault} -g {rg} -c {name} -y')
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 0)])
@record_only()
def test_backup_wl_sql_crr(self):
self.kwargs.update({
'vault': "sql-clitest-vault",
'name': "VMAppContainer;Compute;sql-clitest-rg;sql-clitest-vm",
'fname': "sql-clitest-vm",
'wt': 'MSSQL',
'sub': "vsarg-MABPortalTestAutomation_NOB",
'rg': "sql-clitest-rg",
'item': "SQLDataBase;mssqlserver;msdb",
'fitem': "msdb",
'tvault': "clitest-vault-secondary-donotuse",
'trg': "clitest-rg-donotuse",
'tcontainer': "clitest-sql-secondary-donotuse",
'tserver': "clitest-sql-sec",
'tpit': 'SQLInstance',
'titem': 'msdb_restored'
})
self.cmd('backup container list -v {vault} -g {rg} --backup-management-type AzureWorkload', checks=[
self.check("length([?name == '{name}'])", 1)])
self.kwargs['container1'] = self.cmd('backup container show -n {name} -v {vault} -g {rg} --backup-management-type AzureWorkload --query name').get_output_in_json()
self.cmd('backup item show -g {rg} -v {vault} -c {container1} -n {item} --backup-management-type AzureWorkload', checks=[
self.check('properties.friendlyName', '{fitem}'),
self.check('properties.protectedItemHealthStatus', 'Healthy'),
self.check('properties.protectionState', 'Protected'),
self.check('properties.protectionStatus', 'Healthy'),
self.check('resourceGroup', '{rg}')
])
self.kwargs['rp'] = self.cmd('backup recoverypoint list -g {rg} -v {vault} -c {name} -i {item} --workload-type {wt} --use-secondary-region --query [0]').get_output_in_json()
self.kwargs['rp'] = self.kwargs['rp']['name']
#SQL CRR ALR Restore
self.kwargs['rc'] = json.dumps(self.cmd('backup recoveryconfig show --vault-name {vault} -g {rg} --restore-mode AlternateWorkloadRestore --rp-name {rp} --item-name {item} --container-name {container1} --target-item-name {titem} --target-server-type SQLInstance --target-server-name {tserver} --target-container-name {tcontainer} --workload-type {wt} --target-vault-name {tvault} --target-resource-group {trg}').get_output_in_json(), separators=(',', ':'))
with open("recoveryconfig_sql_crr.json", "w") as f:
f.write(self.kwargs['rc'])
self.kwargs['backup_job'] = self.cmd('backup restore restore-azurewl --vault-name {vault} -g {rg} --recovery-config recoveryconfig_sql_crr.json --use-secondary-region', checks=[
self.check("properties.operation", "CrossRegionRestore"),
self.check("properties.status", "InProgress")
]).get_output_in_json()
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job} --use-secondary-region')
#SQL CRR RAF Restore
self.kwargs['rc'] = json.dumps(self.cmd('backup recoveryconfig show --vault-name {vault} -g {rg} --restore-mode restoreasfiles --rp-name {rp} --item-name {item} --container-name {container1} --target-container-name {tcontainer} --workload-type {wt} --target-vault-name {tvault} --target-resource-group {trg} --filepath "C:\"').get_output_in_json(), separators=(',', ':'))
with open("recoveryconfig_sql_crr.json", "w") as f:
f.write(self.kwargs['rc'])
self.kwargs['backup_job'] = self.cmd('backup restore restore-azurewl --vault-name {vault} -g {rg} --recovery-config recoveryconfig_sql_crr.json --use-secondary-region', checks=[
self.check("properties.operation", "CrossRegionRestore"),
self.check("properties.status", "InProgress")
]).get_output_in_json()
self.kwargs['job'] = self.kwargs['backup_job']['name']
self.cmd('backup job wait -v {vault} -g {rg} -n {job} --use-secondary-region')
@record_only()
def test_backup_wl_sql_archive (self):
self.kwargs.update({
'vault': "archiveccyvault1",
'rg': "ArchiveResourceGroup",
'sub': "AzureBackup_Functional_Testing",
'item': "SQLDataBase;mssqlserver;msdb",
'container': "VMAppContainer;compute;archiveresourcegroup;archsqlccyvm2"
})
# Getting the recovery point IDs (names) and storing it in a list
rp_names = self.cmd('backup recoverypoint list --backup-management-type AzureWorkload --workload-type MSSQL -g {rg} -v {vault} -c {container} -i {item}', checks=[
]).get_output_in_json()
self.kwargs['rp1'] = rp_names[0]['name']
self.kwargs['rp1_tier'] = rp_names[0]['tierType']
self.kwargs['rp1_is_ready_for_move'] = rp_names[0]['properties']['recoveryPointMoveReadinessInfo']['ArchivedRP']['isReadyForMove']
# Check Archivable Recovery Points
self.cmd('backup recoverypoint list -g {rg} -v {vault} -i {item} -c {container} --backup-management-type AzureWorkload --is-ready-for-move {rp1_is_ready_for_move} --target-tier VaultArchive --query [0]', checks=[
self.check("resourceGroup", '{rg}'),
self.check("properties.recoveryPointMoveReadinessInfo.ArchivedRP.isReadyForMove", '{rp1_is_ready_for_move}')
])
# Get Archived Recovery Points
self.cmd('backup recoverypoint list -g {rg} -v {vault} -i {item} -c {container} --backup-management-type AzureWorkload --tier {rp1_tier} --query [0]', checks=[
self.check("tierType", '{rp1_tier}'),
self.check("resourceGroup", '{rg}')
])
is_move = False
for i in rp_names:
if i['tierType']=="VaultStandard" and i['properties']['recoveryPointMoveReadinessInfo']['ArchivedRP']['isReadyForMove']==True:
self.kwargs['rp_move'] = i['name']
is_move = True
break
if is_move:
# # Move Recovery points
self.cmd('backup recoverypoint move -g {rg} -v {vault} -i {item} -c {container} --source-tier VaultStandard --destination-tier VaultArchive --name {rp_move}', checks=[
self.check("properties.entityFriendlyName", 'msdb [archsqlccyvm2]'),
self.check("resourceGroup", '{rg}'),
self.check("properties.operation", "MoveRecoveryPoint"),
self.check("properties.status", "Completed")
])
# Getting the recovery point ID in VaultArchive tier
self.kwargs['rp_restore'] = self.cmd('backup recoverypoint list --backup-management-type AzureWorkload --workload-type MSSQL -g {rg} -v {vault} -c {container} -i {item} --tier VaultArchive --query [0]').get_output_in_json()
self.kwargs['rp_restore'] = self.kwargs['rp_restore']['name']
# # Integrated Restore
self.kwargs['rc'] = json.dumps(self.cmd('backup recoveryconfig show --vault-name {vault} -g {rg} --restore-mode OriginalWorkloadRestore --item-name {item} --container-name {container} --rp-name {rp_restore}').get_output_in_json(), separators=(',', ':'))
with open("recoveryconfig_sql_archive.json", "w") as f:
f.write(self.kwargs['rc'])
# # Trigger Restore
self.cmd('backup restore restore-azurewl -g {rg} -v {vault} --recovery-config recoveryconfig_sql_archive.json --rehydration-priority High', checks=[
self.check("properties.operation", "RestoreWithRehydrate"),
self.check("properties.status", "InProgress"),
self.check("resourceGroup", '{rg}')
]).get_output_in_json()
|
py | 1a52807d5ae10d62b95d33236aaea2c988f13b28 | # -*- coding: utf-8 -*-
# This is for introducing **syntactic** local bindings, i.e. simple code splicing
# at macro expansion time. If you're looking for regular run-time let et al. macros,
# see letdo.py.
# TODO: Coverage of code using `with block` and `with expr` is not reported correctly.
#
# TODO: As this is a toy macro system within the real macro system, that is to be expected;
# TODO: `mcpyrate` goes to some degree of trouble to produce correct coverage reporting for
# TODO: the real macro system, and we haven't duplicated that effort here.
#
# TODO: With `mcpyrate`, we don't really need `let_syntax` and `abbrev` anymore, so we could
# TODO: actually remove them; but their tests exercise some code paths that would otherwise
# TODO: remain untested. As of v0.15.0, we're keeping them for now.
__all__ = ["let_syntax", "abbrev", "expr", "block"]
from mcpyrate.quotes import macros, q, a # noqa: F401
from ast import Name, Call, Subscript, Tuple, Starred, Expr, With
from copy import deepcopy
from functools import partial
import sys
from mcpyrate import parametricmacro
from mcpyrate.quotes import is_captured_value
from mcpyrate.utils import rename
from mcpyrate.walkers import ASTTransformer, ASTVisitor
from .letdo import _implicit_do, _destructure_and_apply_let
from .nameutil import is_unexpanded_block_macro
from .util import eliminate_ifones
from ..dynassign import dyn
# --------------------------------------------------------------------------------
# Macro interface
@parametricmacro
def let_syntax(tree, *, args, syntax, expander, **kw):
"""[syntax, expr/block] Introduce local **syntactic** bindings.
**Expression variant**::
let_syntax[lhs << rhs, ...][body]
let_syntax[lhs << rhs, ...][[body0, ...]]
Alternative haskelly syntax::
let_syntax[[lhs << rhs, ...] in body]
let_syntax[[lhs << rhs, ...] in [body0, ...]]
let_syntax[body, where[lhs << rhs, ...]]
let_syntax[[body0, ...], where[lhs << rhs, ...]]
**Block variant**::
with let_syntax:
with block as xs: # capture a block of statements - bare name
...
with block[a, ...] as xs: # capture a block of statements - template
...
with expr as x: # capture a single expression - bare name
...
with expr[a, ...] as x: # capture a single expression - template
...
body0
...
A single expression can be a ``do[]`` if multiple expressions are needed.
The bindings are applied **at macro expansion time**, substituting
the expression on the RHS for each instance of the corresponding LHS.
Each substitution gets a fresh copy.
This is useful to e.g. locally abbreviate long function names at macro
expansion time (with zero run-time overhead), or to splice in several
(possibly parametric) instances of a common pattern.
In the expression variant, ``lhs`` may be:
- A bare name (e.g. ``x``), or
- A simple template of the form ``f(x, ...)``. The names inside the
parentheses declare the formal parameters of the template (that can
then be used in the body).
In the block variant:
- The **as-part** specifies the name of the LHS.
- If a template, the formal parameters are declared on the ``block``
or ``expr``, not on the as-part (due to syntactic limitations).
**Templates**
To make parametric substitutions, use templates.
Templates support only positional arguments, with no default values.
Even in block templates, parameters are always expressions (because they
use the subscript syntax at the use site).
In the body of the ``let_syntax``, a template is used like an expr macro.
Just like in an actual macro invocation, when the template is substituted,
any instances of its formal parameters on its RHS get replaced by the
argument values from the invocation site.
Note each instance of the same formal parameter gets a fresh copy of the
corresponding argument value.
**Substitution order**
This is a two-step process. In the first step, we apply template substitutions.
In the second step, we apply bare name substitutions to the result of the
first step. (So RHSs of templates may use any of the bare-name definitions.)
Within each step, the substitutions are applied **in the order specified**.
So if the bindings are ``((x, y), (y, z))``, then ``x`` transforms to ``z``.
But if the bindings are ``((y, z), (x, y))``, then ``x`` transforms to ``y``,
and only an explicit ``y`` at the use site transforms to ``z``.
**Notes**
Inspired by Racket's ``let-syntax`` and ``with-syntax``, see:
https://docs.racket-lang.org/reference/let.html
https://docs.racket-lang.org/reference/stx-patterns.html
**CAUTION**: This is essentially a toy macro system inside the real
macro system, implemented with the real macro system.
The usual caveats of macro systems apply. Especially, we support absolutely
no form of hygiene. Be very, very careful to avoid name conflicts.
``let_syntax`` is meant only for simple local substitutions where the
elimination of repetition can shorten the code and improve readability.
If you need to do something complex, prefer writing a real macro directly
in `mcpyrate`.
"""
if syntax not in ("expr", "block"):
raise SyntaxError("let_syntax is an expr and block macro only") # pragma: no cover
if syntax == "block" and kw['optional_vars'] is not None:
raise SyntaxError("let_syntax (block mode) does not take an as-part") # pragma: no cover
if syntax == "expr":
_let_syntax_expr_inside_out = partial(_let_syntax_expr, expand_inside=True)
return _destructure_and_apply_let(tree, args, expander, _let_syntax_expr_inside_out, letsyntax_mode=True)
else: # syntax == "block":
with dyn.let(_macro_expander=expander):
return _let_syntax_block(block_body=tree, expand_inside=True)
@parametricmacro
def abbrev(tree, *, args, syntax, expander, **kw):
"""[syntax, expr/block] Exactly like ``let_syntax``, but expands outside in.
Because this variant expands before any macros in the body, it can locally
rename other macros, e.g.::
abbrev[m << macrowithverylongname][
m[tree1] if m[tree2] else m[tree3]]
**CAUTION**: Because ``abbrev`` expands outside-in, and does not respect
boundaries of any nested ``abbrev`` invocations, it will not lexically scope
the substitutions. Instead, the outermost ``abbrev`` expands first, and then
any inner ones expand with whatever substitutions they have remaining.
If the same name is used on the LHS in two or more nested ``abbrev``,
any inner ones will likely raise an error (unless the outer substitution
just replaces a name with another), because also the names on the LHS
in the inner ``abbrev`` will undergo substitution when the outer
``abbrev`` expands.
"""
if syntax not in ("expr", "block"):
raise SyntaxError("abbrev is an expr and block macro only") # pragma: no cover
if syntax == "block" and kw['optional_vars'] is not None:
raise SyntaxError("abbrev (block mode) does not take an as-part") # pragma: no cover
# DON'T expand inner macro invocations first - outside-in ordering is the default, so we simply do nothing.
if syntax == "expr":
_let_syntax_expr_outside_in = partial(_let_syntax_expr, expand_inside=False)
return _destructure_and_apply_let(tree, args, expander, _let_syntax_expr_outside_in,
letsyntax_mode=True)
else:
with dyn.let(_macro_expander=expander):
return _let_syntax_block(block_body=tree, expand_inside=False)
@parametricmacro
def expr(tree, *, syntax, **kw):
"""[syntax, block] ``with expr:`` inside a ``with let_syntax:``."""
if syntax != "block":
raise SyntaxError("`expr` is a block macro only") # pragma: no cover
raise SyntaxError("`expr` is only valid at the top level of a block-mode `let_syntax` or `abbrev`") # pragma: no cover, not intended to hit the expander
@parametricmacro
def block(tree, *, syntax, **kw):
"""[syntax, block] ``with block:`` inside a ``with let_syntax:``."""
if syntax != "block":
raise SyntaxError("`block` is a block macro only") # pragma: no cover
raise SyntaxError("`block` is only valid at the top level of a block-mode `let_syntax` or `abbrev`") # pragma: no cover, not intended to hit the expander
# --------------------------------------------------------------------------------
# Syntax transformers
# let_syntax[lhs << rhs, ...][body]
# let_syntax[lhs << rhs, ...][[body0, ...]]
# let_syntax[[lhs << rhs, ...] in body]
# let_syntax[[lhs << rhs, ...] in [body0, ...]]
# let_syntax[body, where[lhs << rhs, ...]]
# let_syntax[[body0, ...], where[lhs << rhs, ...]]
#
# This transformer takes destructured input, with the bindings subform
# and the body already extracted, and supplied separately.
#
# bindings: sequence of ast.Tuple: (k1, v1), (k2, v2), ..., (kn, vn)
# expand_inside: if True, expand inside-out. If False, expand outside-in.
def _let_syntax_expr(bindings, body, *, expand_inside):
body = _implicit_do(body) # support the extra bracket syntax
if not bindings: # Optimize out a `let_syntax` with no bindings.
return body # pragma: no cover
names_seen = set()
templates = []
barenames = []
def register_bindings():
for line in bindings:
key, value = line.elts
name, args = _analyze_lhs(key)
if name in names_seen:
raise SyntaxError(f"duplicate '{name}'; names defined in the same let_syntax expr must be unique") # pragma: no cover
names_seen.add(name)
target = templates if args else barenames
target.append((name, args, value, "expr"))
if expand_inside:
bindings = dyn._macro_expander.visit_recursively(bindings)
body = dyn._macro_expander.visit_recursively(body)
register_bindings()
body = _substitute_templates(templates, body)
body = _substitute_barenames(barenames, body)
return body
# block version:
#
# with let_syntax:
# with block as xs:
# ...
# with block[a, ...] as xs:
# ...
# with expr as x:
# ...
# with expr[a, ...] as x:
# ...
# body0
# ...
#
# expand_inside: if True, expand inside-out. If False, expand outside-in.
def _let_syntax_block(block_body, *, expand_inside):
is_let_syntax = partial(is_unexpanded_block_macro, let_syntax, dyn._macro_expander)
is_abbrev = partial(is_unexpanded_block_macro, abbrev, dyn._macro_expander)
is_expr_declaration = partial(is_unexpanded_block_macro, expr, dyn._macro_expander)
is_block_declaration = partial(is_unexpanded_block_macro, block, dyn._macro_expander)
is_helper_macro = lambda tree: is_expr_declaration(tree) or is_block_declaration(tree)
def check_strays(ismatch, tree):
class StrayHelperMacroChecker(ASTVisitor): # TODO: refactor this?
def examine(self, tree):
if is_captured_value(tree):
return # don't recurse!
elif is_let_syntax(tree) or is_abbrev(tree):
return # don't recurse!
elif ismatch(tree):
# Expand the stray helper macro invocation, to trigger its `SyntaxError`
# with a useful message, and *make the expander generate a use site traceback*.
#
# (If we just `raise` here directly, the expander won't see the use site
# of the `with expr` or `with block`, but just that of the `do[]`.)
dyn._macro_expander.visit(tree)
self.generic_visit(tree)
StrayHelperMacroChecker().visit(tree)
check_stray_blocks_and_exprs = partial(check_strays, is_helper_macro)
names_seen = set()
def destructure_binding(withstmt, mode, kind):
assert mode in ("block", "expr")
assert kind in ("barename", "template")
ctxmanager = withstmt.items[0].context_expr
optvars = withstmt.items[0].optional_vars
if not optvars:
raise SyntaxError(f"'with {mode}:': expected an as-part") # pragma: no cover
if type(optvars) is not Name:
raise SyntaxError(f"'with {mode}:': expected exactly one name in the as-part") # pragma: no cover
name = optvars.id
if name in names_seen:
raise SyntaxError(f"duplicate '{name}'; as-parts in the same let_syntax block must be unique") # pragma: no cover
if kind == "template":
_, args = _analyze_lhs(ctxmanager) # syntactic limitation, can't place formal parameter list on the as-part
else: # kind == "barename":
args = []
if mode == "block":
with q as value:
if 1:
with a:
withstmt.body
else: # mode == "expr":
if len(withstmt.body) != 1:
raise SyntaxError("'with expr:' expected a one-item body (use a do[] if need more)") # pragma: no cover
theexpr = withstmt.body[0]
if type(theexpr) is not Expr:
raise SyntaxError("'with expr:' expected an expression body, got a statement") # pragma: no cover
value = theexpr.value # discard Expr wrapper in definition
names_seen.add(name)
return name, args, value, mode
def isbinding(tree):
for mode in ("block", "expr"):
if not (type(tree) is With and len(tree.items) == 1):
continue
ctxmanager = tree.items[0].context_expr
if type(ctxmanager) is Name and ctxmanager.id == mode:
return mode, "barename"
# expr[...], block[...]
if type(ctxmanager) is Subscript and type(ctxmanager.value) is Name and ctxmanager.value.id == mode:
return mode, "template"
# expr(...), block(...)
# parenthesis syntax for macro arguments TODO: Python 3.9+: remove once we bump minimum Python to 3.9
if type(ctxmanager) is Call and type(ctxmanager.func) is Name and ctxmanager.func.id == mode:
return mode, "template"
return False
templates = []
barenames = []
new_block_body = []
for stmt in block_body:
# `let_syntax` mode (expand_inside): respect lexical scoping of nested `let_syntax`/`abbrev`
expanded = False
if expand_inside and (is_let_syntax(stmt) or is_abbrev(stmt)):
stmt = dyn._macro_expander.visit_recursively(stmt)
expanded = True
stmt = _substitute_templates(templates, stmt)
stmt = _substitute_barenames(barenames, stmt)
binding_data = isbinding(stmt)
if binding_data:
name, args, value, mode = destructure_binding(stmt, *binding_data)
check_stray_blocks_and_exprs(value) # before expanding it!
if expand_inside and not expanded:
value = dyn._macro_expander.visit_recursively(value)
target = templates if args else barenames
target.append((name, args, value, mode))
else:
check_stray_blocks_and_exprs(stmt) # before expanding it!
if expand_inside and not expanded:
stmt = dyn._macro_expander.visit_recursively(stmt)
new_block_body.append(stmt)
new_block_body = eliminate_ifones(new_block_body)
if not new_block_body:
raise SyntaxError("let_syntax: expected at least one statement beside definitions") # pragma: no cover
return new_block_body
# -----------------------------------------------------------------------------
def _get_subscript_args(tree):
if sys.version_info >= (3, 9, 0): # Python 3.9+: the Index wrapper is gone.
theslice = tree.slice
else:
theslice = tree.slice.value
if type(theslice) is Tuple:
args = theslice.elts
else:
args = [theslice]
return args
# x --> "x", []
# f[a, b, c] --> "f", ["a", "b", "c"]
# f(a, b, c) --> "f", ["a", "b", "c"]
def _analyze_lhs(tree):
if type(tree) is Name: # bare name
name = tree.id
args = []
elif type(tree) is Subscript and type(tree.value) is Name: # template f[x, ...]
name = tree.value.id
args = [a.id for a in _get_subscript_args(tree)]
# parenthesis syntax for macro arguments TODO: Python 3.9+: remove once we bump minimum Python to 3.9
elif type(tree) is Call and type(tree.func) is Name: # template f(x, ...)
name = tree.func.id
if any(type(a) is Starred for a in tree.args): # *args (Python 3.5+)
raise SyntaxError("in template, only positional parameters supported (no *args)") # pragma: no cover
args = [a.id for a in tree.args]
if tree.keywords:
raise SyntaxError("in template, only positional parameters supported (no named args or **kwargs)") # pragma: no cover
else:
raise SyntaxError("expected a name (e.g. x) or a template (e.g. f(x, ...)) on the LHS") # pragma: no cover
return name, args
def _substitute_barename(name, value, tree, mode):
def isthisname(tree):
return type(tree) is Name and tree.id == name
def splice(tree):
class Splicer(ASTTransformer):
def transform(self, tree):
if is_captured_value(tree):
return tree # don't recurse!
def subst():
# Copy just to be on the safe side. Different instances may be
# edited differently by other macros expanded later.
return deepcopy(value)
# discard Expr wrapper (identifying a statement position) at use site
# when performing a block substitution
if mode == "block" and type(tree) is Expr and isthisname(tree.value):
tree = subst()
return tree
elif isthisname(tree):
if mode == "block":
raise SyntaxError(f"cannot substitute block '{name}' into expression position") # pragma: no cover
tree = subst()
return self.generic_visit(tree)
return self.generic_visit(tree)
return Splicer().visit(tree)
# If the new value is also bare name, perform the substitution (now as a string)
# also in the name part of def and similar, to support human intuition of "renaming".
if type(value) is Name:
postproc = partial(rename, name, value.id)
else:
postproc = lambda x: x
return postproc(splice(tree))
def _substitute_barenames(barenames, tree):
for name, _noformalparams, value, mode in barenames:
tree = _substitute_barename(name, value, tree, mode)
return tree
def _substitute_templates(templates, tree):
for name, formalparams, value, mode in templates:
def isthisfunc(tree):
if type(tree) is Subscript and type(tree.value) is Name and tree.value.id == name:
return True
# parenthesis syntax for macro arguments TODO: Python 3.9+: remove once we bump minimum Python to 3.9
if type(tree) is Call and type(tree.func) is Name and tree.func.id == name:
return True
return False
def subst(tree):
if type(tree) is Subscript:
theargs = _get_subscript_args(tree)
elif type(tree) is Call:
theargs = tree.args
else:
assert False
if len(theargs) != len(formalparams):
raise SyntaxError(f"let_syntax template '{name}' expected {len(formalparams)} arguments, got {len(theargs)}") # pragma: no cover
# make a fresh deep copy of the RHS to avoid destroying the template.
tree = deepcopy(value) # expand the f itself in f[x, ...] or f(x, ...)
for k, v in zip(formalparams, theargs): # expand the x, ... in the expanded form of f
# can't put statements in a Subscript or in a Call, so always treat args as expressions.
tree = _substitute_barename(k, v, tree, "expr")
return tree
def splice(tree):
class Splicer(ASTTransformer):
def transform(self, tree):
if is_captured_value(tree):
return tree # don't recurse!
# discard Expr wrapper (identifying a statement position) at use site
# when performing a block substitution
if mode == "block" and type(tree) is Expr and isthisfunc(tree.value):
tree = subst(tree.value)
return tree
elif isthisfunc(tree):
if mode == "block":
raise SyntaxError(f"cannot substitute block '{name}' into expression position") # pragma: no cover
tree = subst(tree)
return self.generic_visit(tree)
return self.generic_visit(tree)
return Splicer().visit(tree)
tree = splice(tree)
return tree
|
py | 1a5280b0118ab60b99714b9977daa52c30da553a | from __future__ import unicode_literals
import json
from django import forms
from django.utils.safestring import mark_safe
from .conf import settings
class MediumEditorTextarea(forms.Textarea):
def render(self, name, value, attrs=None, renderer=None):
if attrs is None:
attrs = {}
attrs.update({'class': 'django-mediumeditor-input'})
identifier = attrs.get('id', 'id_{}'.format(name))
params = {
'data-mediumeditor-textarea': identifier,
'class': 'django-mediumeditor-editable',
'id': '{}_editable'.format(identifier),
}
param_str = ' '.join('{}="{}"'.format(k, v) for k, v in params.items())
html = super(MediumEditorTextarea, self).render(name, value, attrs)
options = json.dumps(settings.MEDIUM_EDITOR_OPTIONS)
html = mark_safe(u'''{}
<div {}></div>
<script type="text/javascript">
MediumEditorOptions={};
</script>'''.format(html, param_str, options))
return html
class Media:
css = {'all': (
'//cdn.jsdelivr.net/medium-editor/latest/css/'
'medium-editor.min.css',
'css/mediumeditor/django-mediumeditor.css',
'//cdn.jsdelivr.net/medium-editor/latest/css/themes/{}.min.css'.format(
settings.MEDIUM_EDITOR_THEME
)
)}
js = (
'//cdn.jsdelivr.net/medium-editor/latest/js/medium-editor.min.js',
'js/mediumeditor/django-mediumeditor.js', )
|
py | 1a5281908a5329c61f48219e02a9c0ddd328bfd5 | import os
import tensorflow as tf
from configparser import ConfigParser
from utilities.set_dirs import get_conf_dir
conf_dir = get_conf_dir(debug=False)
parser = ConfigParser(os.environ)
parser.read(os.path.join(conf_dir, 'neural_network.ini'))
# AdamOptimizer
beta1 = parser.getfloat('optimizer', 'beta1')
beta2 = parser.getfloat('optimizer', 'beta2')
epsilon = parser.getfloat('optimizer', 'epsilon')
learning_rate = parser.getfloat('optimizer', 'learning_rate')
def variable_on_cpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/cpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def create_optimizer():
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon)
return optimizer
|
py | 1a52837a95b8f281b0c73e5cd3cb76413e061ce3 |
from .expression import Params, ParamsExpression
class Function(ParamsExpression):
__visit_name__ = 'function'
def __init__(self, filter=None, weight=None, **kwargs):
self.filter = filter
self.weight = weight
super(Function, self).__init__(**kwargs)
class Weight(Function):
__func_name__ = 'weight'
__visit_name__ = 'weight_function'
def __init__(self, weight, filter=None):
super(Weight, self).__init__(filter=filter, weight=weight)
class FieldValueFactor(Function):
__func_name__ = 'field_value_factor'
def __init__(
self, field, factor=None, modifier=None, missing=None,
filter=None, **kwargs
):
super(FieldValueFactor, self).__init__(
field=field, factor=factor, modifier=modifier, missing=missing,
filter=filter, **kwargs
)
Factor = FieldValueFactor
class ScriptScore(Function):
__func_name__ = 'script_score'
def __init__(self, script, filter=None, **kwargs):
super(ScriptScore, self).__init__(
script=script, filter=filter, **kwargs
)
class RandomScore(Function):
__func_name__ = 'random_score'
def __init__(self, seed=None, filter=None, **kwargs):
super(RandomScore, self).__init__(seed=seed, filter=filter, **kwargs)
class DecayFunction(Function):
__visit_name__ = 'decay_function'
def __init__(
self, field, origin, scale, offset=None, decay=None,
multi_value_mode=None, **kwargs
):
self.field = field
self.decay_params = Params(
origin=origin, scale=scale, offset=offset, decay=decay,
)
super(DecayFunction, self).__init__(
multi_value_mode=multi_value_mode, **kwargs
)
class Gauss(DecayFunction):
__func_name__ = 'gauss'
class Exp(DecayFunction):
__func_name__ = 'exp'
class Linear(DecayFunction):
__func_name__ = 'linear'
|
py | 1a5284023b842ed6b279fe1c9393e9cb8cd8d537 | import functools
import requests
import pyvo
import pyvo.auth.authsession
import warnings
from rubin_jupyter_utils.helpers import get_access_token
from rubin_jupyter_utils.config import RubinConfig
def deprecated(new_name=''):
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if new_name:
warnings.warn(f"Call to deprecated function {func.__name__}. " +
"This function may be removed at any point in the future. " +
f"Please use {new_name} instead.",
category=DeprecationWarning,
stacklevel=2)
else:
warnings.warn(f"Call to deprecated function {func.__name__}. " +
"This function may be removed at any point in the future.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
return deprecated
def _get_tap_url():
rc = RubinConfig()
tapurl = rc.external_tap_url or (rc.external_instance_url +
rc.tap_route)
return tapurl
def _get_auth():
tap_url = _get_tap_url()
s = requests.Session()
s.headers["Authorization"] = "Bearer " + get_access_token()
auth = pyvo.auth.authsession.AuthSession()
auth.credentials.set("lsst-token", s)
auth.add_security_method_for_url(tap_url, "lsst-token")
auth.add_security_method_for_url(tap_url + "/sync", "lsst-token")
auth.add_security_method_for_url(tap_url + "/async", "lsst-token")
auth.add_security_method_for_url(tap_url + "/tables", "lsst-token")
return auth
def get_tap_service():
return pyvo.dal.TAPService(_get_tap_url(), _get_auth())
@deprecated(new_name="get_tap_service")
def get_catalog():
return get_tap_service()
def retrieve_query(query_url):
return pyvo.dal.AsyncTAPJob(query_url, _get_auth())
|
py | 1a5284a92771225aa82d3837629e219ea69bb278 | from django.db import models
import datetime
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=350)
pub_date = models.DateField('Date Published')
def was_published_recently(self):
now = timezone.now()
diff = now - datetime.timedelta(days=1)
return diff.date() <= self.pub_date <= now.date()
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
py | 1a52851009f957a45ea51e62b11fdcc6302a15c4 | from collections import defaultdict, Sized
import numpy as np
import pandas as pd
from pandas._libs.lib import fast_zip
from pandas._libs.parsers import union_categoricals
from pandas.core.dtypes.common import is_numeric_dtype
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph._traversal import connected_components
def get_sequence_length(obj):
if isinstance(obj, str) or not isinstance(obj, Sized):
return -1
elif isinstance(obj, Sized) and all(not isinstance(i, Sized) and pd.isnull(i) for i in obj):
return -2
else:
return len(obj)
def flatten(frame,
index_name=None,
as_index=False,
keep_na=False,
columns=None,
tile_index=False):
"""
Flatten the input before the transformation
Parameters
----------
frame: pandas.DataFrame
index_name: str
Name of the index to append to indentify each item uniquely
keep_na: bool or str
Should non-sequences elements (or sequences full of None) be kept in the dataframe
as an empty row (value given is None and new index value is None also)
columns: tuple of str
Flatten only sequence in these columns if not None
Returns
-------
(pandas.DataFrame, pandas.DataFrame, callable)
flattened input:
Flattened input to transform
length:
Lengths of the sequences. We will actually only want to know if it was a sequence
or not (see get_sequence_length(...)), during either unflattening if regroup is
True or during rationale backpropagation
sequence_constructor:
Returns the "type" of the sequences contained in the frame, or more specifically
the function used to build an instance of these sequences. Will be used during
unflattening if self.regroup is True and during rationale backpropagation
"""
if isinstance(as_index, bool):
as_column = not as_index
elif isinstance(as_index, str) and index_name is None:
index_name = as_index
as_column = False
else:
raise Exception("as_index must be str or bool, and if str, index_name must be None")
if isinstance(frame, pd.Series):
res = flatten(pd.DataFrame({"X": frame}), index_name, as_column, keep_na, columns, tile_index)
new_frame = res["X"]
new_frame.name = frame.name
return new_frame
if keep_na is True:
keep_na = 'null_index'
elif keep_na is False:
keep_na = 'remove'
assert keep_na in ('null_index', 'as_single_item', 'remove')
assert isinstance(frame, pd.DataFrame), "Can only flatten DataFrame"
if columns is None:
columns = frame.columns
elif not isinstance(columns, (tuple, list)):
columns = [columns]
else:
columns = list(columns)
lengths = frame[columns].applymap(lambda seq: get_sequence_length(seq))
for col in frame.columns:
if col not in columns:
lengths[col] = -1
result_lengths = lengths.max(axis=1)
# Each column element will be expanded on multiple rows,
# even if it is a non-iterable object
# We must know before how many rows will the expansion take
# and we take this length from the maximum sequence size
if keep_na == 'remove':
bool_row_selector = result_lengths > 0
result_lengths = result_lengths[bool_row_selector]
selected_lengths = lengths[bool_row_selector]
frame = frame[bool_row_selector]
nulls = None
else:
nulls = result_lengths < 0
# Non sequence or sequence full of None will give rise to 1 row
result_lengths[nulls] = 1
selected_lengths = lengths
nulls = result_lengths.cumsum()[nulls] - 1
categoricals = {}
frame = frame.copy()
for col in frame.columns:
if hasattr(frame[col], 'cat'):
categoricals[col] = frame[col].cat.categories
frame[col] = frame[col].cat.codes
flattened = {col: [] for col in frame.columns}
for col_name, col in frame.iteritems():
for obj, res_length, length in zip(col.values, result_lengths, selected_lengths[col_name]):
if length >= 0: # we have a normal sequence
flattened[col_name].append(obj if isinstance(obj, pd.Series) else pd.Series(obj))
# Otherwise it a non sequence, create as many rows as needed for it
else:
# -2 means sequence full of None, we put a None instead here
if length == -2:
obj = None
if res_length == 1:
flattened[col_name].append(pd.Series([obj]))
else:
flattened[col_name].append(pd.Series([obj] * res_length))
index = frame.index.repeat(result_lengths) if index_name is not None else None
for col_name in flattened:
flattened[col_name] = pd.concat(flattened[col_name], ignore_index=True)
if index is not None:
flattened[col_name].index = index
flattened = pd.DataFrame(flattened)
# flattened = pd.DataFrame(
# data={col_name: pd.concat(flattened[col_name], ignore_index=True) for col_name in flattened},
# index=frame.index.repeat(result_lengths) if index_name is not None else None)
for name, categories in categoricals.items():
flattened[name] = pd.Categorical.from_codes(flattened[name], categories=categories)
# Adds an index under the name `self.index_name` to identify uniquely every row
# of the frame
if index_name is not None:
if index_name in flattened.columns:
flattened.set_index(index_name, append=True, inplace=True)
else:
if tile_index:
new_index_values = np.concatenate([np.arange(s) for s in result_lengths])
flattened[index_name] = new_index_values
else:
new_index_values = np.arange(len(flattened))
flattened[index_name] = new_index_values
flattened[index_name] = flattened[index_name]
flattened.set_index(index_name, append=True, inplace=True)
if keep_na == 'null_index' and nulls is not None:
new_labels = np.arange(len(flattened))
# noinspection PyUnresolvedReferences
new_labels[nulls.values] = -1
flattened.index.set_codes(
new_labels, level=index_name, inplace=True)
if as_column:
flattened.reset_index(index_name, inplace=True)
flattened.reset_index(inplace=True, drop=True)
# flattened.index = flattened.index.remove_unused_levels()
return flattened
def make_merged_names(left_span_names, right_span_names, left_on, right_on, left_columns, right_columns,
suffixes=('_x', '_y')):
right_columns = set(right_columns) - set(right_on)
left_columns = set(left_columns) - set(left_on)
left_merged = [name + (suffixes[0] if name in right_columns else '') for name in left_span_names]
right_merged = [name + (suffixes[1] if name in left_columns else '') for name in right_span_names]
return left_merged, right_merged
def make_merged_names_map(left_columns, right_columns, left_on, right_on, suffixes=('_x', '_y')):
right_columns = set(right_columns) - set(right_on)
left_columns = set(left_columns) - set(left_on)
left_merged = [name + (suffixes[0] if name in right_columns else '') for name in left_columns]
right_merged = [name + (suffixes[1] if name in left_columns else '') for name in right_columns]
return dict(zip(left_columns, left_merged)), dict(zip(right_columns, right_merged))
def merge_with_spans(
left, right=None,
how='inner',
on=None,
left_on=None,
right_on=None,
suffixes=('_x', '_y'),
span_policy='partial_strict',
placeholder_columns=(),
**kwargs):
"""
Just like pandas.merge, but handles the merging of spans
Any tuple in the "on" column will be considered a (begin, end) span
How to merge those span
Parameters
----------
left: pd.DataFrame
right: pd.DataFrame
how: str
"inner", "outer", "left", "right"
on: list of (str or tuple of str)
left_on: list of (str or tuple of str)
right_on: list of (str or tuple of str)
suffixes: list of str
span_policy: str
How to merge spans ?
One of: "partial", "exact", "partial_strict"
placeholder_columns:
Zero will be put as a value instead of nan for any empty cell in those columns after the merge
kwargs: any
Any kwargs for the pd.merge function
Returns
-------
pd.DataFrame
"""
if right is None:
right = left
left = left.copy()
right = right.copy()
if isinstance(on, str):
on = [on]
if left_on is None:
left_on = on
if right_on is None:
right_on = on
left_columns = left.columns if hasattr(left, 'columns') else [left.name]
right_columns = right.columns if hasattr(right, 'columns') else [right.name]
if left_on is None and right_on is None:
left_on = right_on = list(set(left_columns) & set(right_columns))
left_on_spans = [o for o in left_on if isinstance(o, tuple)]
right_on_spans = [o for o in right_on if isinstance(o, tuple)]
left_on = [c for c in left_on if not isinstance(c, tuple)] # flatten_sequence(left_on)
right_on = [c for c in right_on if not isinstance(c, tuple)] # flatten_sequence(right_on)
left_names, right_names = make_merged_names(
left_columns, right.columns,
left_on=left_on,
right_on=right_on,
left_columns=left_columns, right_columns=right_columns, suffixes=suffixes)
left_names_map = dict(zip(left_columns, left_names))
right_names_map = dict(zip(right_columns, right_names))
categoricals = {}
for left_col, right_col in zip(left_on, right_on):
left_cat = getattr(left[left_col] if hasattr(left, 'columns') else left, 'cat', None)
right_cat = getattr(right[right_col] if hasattr(right, 'columns') else right, 'cat', None)
if left_cat is not None or right_cat is not None:
if (left_cat and right_cat and not (left_cat.categories is right_cat.categories)) or (
(left_cat is None) != (right_cat is None)):
left[left_col] = left[left_col].astype('category')
right[right_col] = right[right_col].astype('category')
cat_merge = union_categoricals([left[left_col], right[right_col]])
if hasattr(left, 'columns'):
left[left_col] = cat_merge[:len(left)]
else:
left = cat_merge[:len(left)]
if hasattr(right, 'columns'):
right[right_col] = cat_merge[len(left):]
else:
right = cat_merge[len(left):]
categoricals[left_names_map[left_col]] = left[left_col].cat.categories
categoricals[right_names_map[right_col]] = right[right_col].cat.categories
if hasattr(left, 'columns'):
left[left_col] = left[left_col].cat.codes
else:
left = left.cat.codes
if hasattr(right, 'columns'):
right[right_col] = right[right_col].cat.codes
else:
right = right.cat.codes
if len(left_on_spans) == 0:
merged = pd.merge(left, right, left_on=left_on, right_on=right_on, suffixes=suffixes, how=how, **kwargs)
else:
if how != 'inner':
left['_left_index'] = np.arange(len(left))
right['_right_index'] = np.arange(len(right))
merged = pd.merge(left, right, left_on=left_on, right_on=right_on, suffixes=suffixes, how='inner', **kwargs)
for i, (left_span_names, right_span_names) in enumerate(zip(left_on_spans, right_on_spans)):
(left_begin, left_end), (right_begin, right_end) = make_merged_names(
left_span_names, right_span_names, left_on=left_on, right_on=right_on,
left_columns=left.columns, right_columns=right_columns, suffixes=suffixes)
merged[f'overlap_size_{i}'] = np.minimum(merged[left_end], merged[right_end]) - np.maximum(merged[left_begin], merged[right_begin])
if span_policy != "none":
results = []
chunk_size = 1000000
for chunk_i in range(0, len(merged), chunk_size):
if span_policy == "partial_strict":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({right_end} > {left_begin} and {left_end} > {right_begin})'))
elif span_policy == "partial":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({right_end} >= {left_begin} and {left_end} >= {right_begin})'))
elif span_policy == "exact":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({left_begin} == {right_begin} and {left_end} == {right_end})'))
else:
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(span_policy))
if len(results):
merged = pd.concat(results, sort=False, ignore_index=True)
else:
merged = merged.iloc[:0]
elif span_policy == "none":
pass
else:
raise Exception(f"Unrecognized policy {span_policy}")
if how != 'inner':
if how in ('left', 'outer'):
missing = left[~left['_left_index'].isin(merged['_left_index'])].copy()
missing = missing.rename(left_names_map, axis=1)
for col in right.columns:
if hasattr(right[col], 'cat') and right_names_map[col] not in missing.columns:
missing[right_names_map[col]] = pd.Categorical([None] * len(missing),
categories=right[col].cat.categories)
for col in placeholder_columns:
if col not in left_on and right_names_map.get(col, col) not in left.columns:
missing[right_names_map.get(col, col)] = 0 # -np.arange(len(missing)) - 1
merged = pd.concat([merged, missing.rename(dict(zip(left.columns, left_names)), axis=1)], sort=False,
ignore_index=True)
if how in ('right', 'outer'):
missing = right[~right['_right_index'].isin(merged['_right_index'])].copy()
missing = missing.rename(right_names_map, axis=1)
for col in left.columns:
if hasattr(left[col], 'cat') and left_names_map[col] not in missing.columns:
missing[left_names_map[col]] = pd.Categorical([None] * len(missing),
categories=left[col].cat.categories)
for col in placeholder_columns:
if col not in right_on and left_names_map.get(col, col) not in right.columns:
missing[left_names_map.get(col, col)] = 0 # -np.arange(len(missing)) - 1
merged = pd.concat([merged, missing.rename(dict(zip(right.columns, right_names)), axis=1)], sort=False,
ignore_index=True)
merged = merged.sort_values(['_left_index', '_right_index'])
del merged['_left_index']
del merged['_right_index']
merged = merged.reset_index(drop=True)
for col, categories in categoricals.items():
merged[col] = pd.Categorical.from_codes(merged[col].fillna(-1).astype(int), categories=categories)
return merged
def make_id_from_merged(*indices_arrays, same_ids=False, apply_on=None):
"""
Compute new ids from connected components by looking at `indices_arrays`
Parameters
----------
indices_arrays: collections.Sequence
1d array of positive integers
same_ids: bool
Do the multiple arrays represent the same ids ? (a 3 in one column should therefore be
connected to a 3 in another, event if they are not on the same row)
apply_on: list of (int, any)
Return the new ids matching old ids
for each (index, vector) in apply_on:
return new_ids matching those in vector that should be considered the same
of those of the vector number `index` in the `indices_arrays`
Returns
-------
list of np.ndarray
"""
if not same_ids:
indices_arrays, unique_objects = zip(*(factorize_rows(array, return_categories=True) for array in indices_arrays))
else:
indices_arrays, unique_objects = factorize_rows(indices_arrays, return_categories=True)
unique_objects = [unique_objects] * len(indices_arrays)
offset = max(indices_array.max() for indices_array in indices_arrays) + 1
N = offset * (len(indices_arrays) + 1)
if same_ids:
N = offset
offset = 0
offseted_ids = [s + i * offset for i, s in enumerate(indices_arrays)]
left_ids, right_ids = zip(*[(offseted_ids[i], offseted_ids[j])
for i in range(0, len(indices_arrays) - 1)
for j in range(i + 1, len(indices_arrays))])
left_ids = np.concatenate(left_ids)
right_ids = np.concatenate(right_ids)
_, matches = connected_components(csr_matrix((np.ones(len(left_ids)), (left_ids, right_ids)), shape=(N, N)))
matches = pd.factorize(matches)[0]
if apply_on is None:
return [
matches[s]
for s in offseted_ids
]
else:
return [
matches[factorize_rows(s, categories=unique_objects[i], return_categories=False) + i * offset]
for i, s in apply_on
]
def df_to_csr(rows, cols, data=None, n_rows=None, n_cols=None):
"""
Transforms a dataframe into a csr_matrix
Parameters
----------
data: pd.Series
Data column (column full one True will be used if None)
rows: pd.Series
Column containing row indices (can be Categorical and then codes will be used)
cols: pd.Series
Column containing column indices (can be Categorical and then codes will be used)
n_rows: int
n_cols: int
Returns
-------
csr_matrix
"""
if data is None:
data = np.ones(len(rows), dtype=bool)
if hasattr(rows, 'cat'):
n_rows = len(rows.cat.categories)
rows, rows_cat = rows.cat.codes, rows.cat.categories
else:
n_rows = n_rows or (rows.max() + 1 if len(rows) > 0 else 0)
if hasattr(cols, 'cat'):
n_cols = len(cols.cat.categories)
cols, cols_cat = cols.cat.codes, cols.cat.categories
else:
n_cols = n_cols or (cols.max() + 1 if len(cols) > 0 else 0)
return csr_matrix((np.asarray(data), (np.asarray(rows), np.asarray(cols))), shape=(n_rows, n_cols))
def df_to_flatarray(rows, data, n_rows=None):
"""
Transforms a dataframe into a flat array
Parameters
----------
data: pd.Series
Data column (column full one True will be used if None)
rows: pd.Series
Column containing row indices (can be Categorical and then codes will be used)
n_rows: int
Returns
-------
np.ndarray
"""
if hasattr(rows, 'cat'):
n_rows = len(rows.cat.categories)
rows, rows_cat = rows.cat.codes, rows.cat.categories
else:
n_rows = n_rows or (rows.max() + 1)
res = np.zeros(n_rows, dtype=data.dtype)
res[rows] = np.asarray(data)
return res
def csr_to_df(csr, row_categories=None, col_categories=None, row_name=None, col_name=None, value_name=None):
"""
Convert a csr_matrix to a dataframe
Parameters
----------
csr: csr_matrix
row_categories: any
Categories to rebuild the real object from their row indices
col_categories: any
Categories to rebuild the real object from their col indices
row_name: str
What name to give to the column built from the row indices
col_name: str
What name to give to the column built from the col indices
value_name:
What name to give to the column built from the values
If None, no value column will be built
Returns
-------
pd.DataFrame
"""
csr = csr.tocoo()
rows, cols, values = csr.row, csr.col, csr.data
if isinstance(row_categories, pd.DataFrame):
rows_df = row_categories.iloc[rows]
elif isinstance(row_categories, pd.Series):
rows_df = pd.DataFrame({row_categories.name: row_categories.iloc[rows]})
elif isinstance(row_categories, pd.CategoricalDtype):
rows_df = pd.DataFrame({row_name: pd.Categorical.from_codes(rows, dtype=row_categories)})
else:
rows_df = pd.DataFrame({row_name: rows})
if isinstance(col_categories, pd.DataFrame):
cols_df = col_categories.iloc[cols]
elif isinstance(col_categories, pd.Series):
cols_df = pd.DataFrame({col_categories.name: col_categories.iloc[cols]})
elif isinstance(col_categories, pd.CategoricalDtype):
cols_df = pd.DataFrame({col_name: pd.Categorical.from_codes(cols, dtype=col_categories)})
else:
cols_df = pd.DataFrame({col_name: cols})
res = (rows_df.reset_index(drop=True), cols_df.reset_index(drop=True))
if value_name is not None:
res = res + (pd.DataFrame({value_name: values}),)
return pd.concat(res, axis=1)
def factorize_rows(rows, categories=None, group_nans=True, subset=None, freeze_categories=True, return_categories=True):
if not isinstance(rows, list):
was_list = False
all_rows = [rows]
else:
all_rows = rows
was_list = True
del rows
not_null_subset = (subset if subset is not None else all_rows[0].columns if hasattr(all_rows[0], 'columns') else [all_rows[0].name])
cat_arrays = [[] for _ in not_null_subset]
for rows in (categories, *all_rows) if categories is not None else all_rows:
for (col_name, col), dest in zip(([(0, rows)] if len(rows.shape) == 1 else rows[subset].items() if subset is not None else rows.items()), cat_arrays):
dest.append(np.asarray(col))
cat_arrays = [np.concatenate(arrays) for arrays in cat_arrays]
is_not_nan = None
if not group_nans:
is_not_nan = ~pd.isna(np.stack(cat_arrays, axis=1)).any(1)
cat_arrays = [arrays[is_not_nan] for arrays in cat_arrays]
if len(cat_arrays) > 1:
relative_values, unique_values = pd.factorize(fast_zip(cat_arrays))
else:
relative_values, unique_values = pd.factorize(cat_arrays[0])
if freeze_categories and categories is not None:
relative_values[relative_values >= len(categories)] = -1
if not group_nans:
new_relative_values = np.full(is_not_nan.shape, fill_value=-1, dtype=relative_values.dtype)
new_relative_values[is_not_nan] = relative_values
new_relative_values[~is_not_nan] = len(unique_values) + np.arange((~is_not_nan).sum())
relative_values = new_relative_values
offset = len(categories) if categories is not None else 0
res = []
for rows in all_rows:
new_rows = relative_values[offset:offset + len(rows)]
if isinstance(rows, (pd.DataFrame, pd.Series)):
new_rows = pd.Series(new_rows)
new_rows.index = rows.index
new_rows.name = "+".join(not_null_subset)
res.append(new_rows)
offset += len(rows)
if categories is None and return_categories:
if isinstance(all_rows[0], pd.DataFrame):
if len(cat_arrays) > 1:
categories = pd.DataFrame(dict(zip(not_null_subset, [np.asarray(l) for l in zip(*unique_values)])))
else:
categories = pd.DataFrame({not_null_subset[0]: unique_values})
categories = categories.astype({k: dtype for k, dtype in next(rows for rows in all_rows if len(rows)).dtypes.items() if k in not_null_subset})
elif isinstance(all_rows[0], pd.Series):
categories = pd.Series(unique_values)
categories.name = all_rows[0].name
categories = categories.astype(next(rows.dtype for rows in all_rows if len(rows)))
else:
categories = np.asarray([l for l in zip(*unique_values)])
if not was_list:
res = res[0]
if not return_categories:
return res
return res, categories
def normalize_vocabularies(dfs, vocabularies=None, train_vocabularies=True, unk=None, verbose=0):
"""
Categorize the columns of the dataframes so that they share the same
categories if they share the same columns
If a column's name ends up with '_id', do not categorize it since it is no something we want to train on
Parameters
----------
dfs: list of pd.DataFrame
DataFrame whose columns will be categorized
vocabularies: dict or None
Existing vocabulary to use if any
train_vocabularies: bool or dict of (str, bool)
Which category to extend/create in the voc ?
unk: dict of (str, any)
Which filler should we put for an unknown object if we cannot train the corresponding voc ?
verbose: int
Returns
-------
list of pd.DataFrame, dict
"""
# Define label vocabulary
if unk is None:
unk = {}
if vocabularies is None:
vocabularies = {}
voc_order = list(vocabularies.keys())
if train_vocabularies is False:
train_vocabularies = defaultdict(lambda: False)
else:
train_vocabularies_ = defaultdict(lambda: True)
if isinstance(train_vocabularies, dict):
train_vocabularies_.update(train_vocabularies)
train_vocabularies = train_vocabularies_
del train_vocabularies_
for col_name in vocabularies:
if col_name not in train_vocabularies:
train_vocabularies[col_name] = False
for df in dfs:
for col_name in df:
if not col_name.endswith('_id') and not is_numeric_dtype(df[col_name].dtype):
if train_vocabularies[col_name]:
train_vocabularies[col_name] = True
else:
train_vocabularies[col_name] = False
for col_name, will_train in train_vocabularies.items():
if will_train and verbose:
print(f"Will train vocabulary for {col_name}")
for df in dfs:
for col_name in df:
if hasattr(df[col_name], 'cat') and col_name not in vocabularies and not col_name.endswith('_id'):
if verbose:
print(f"Discovered existing vocabulary ({len(df[col_name].cat.categories)} entities) for {col_name}")
vocabularies[col_name] = list(df[col_name].dtype.categories)
for voc_name, train_voc in train_vocabularies.items():
if train_voc:
voc = list(vocabularies.get(voc_name, []))
if voc_name in unk and unk[voc_name] not in voc:
voc.append(unk[voc_name])
if hasattr(voc, 'categories'):
voc = list(voc.categories)
for df in dfs:
if voc_name in df:
voc.extend(df[voc_name].astype("category").cat.categories)
voc = pd.factorize(voc)[1]
dtype = pd.CategoricalDtype(pd.factorize(voc)[1])
for df in dfs:
if voc_name in df:
df[voc_name] = df[voc_name].astype(dtype)
vocabularies[voc_name] = voc
if voc_name in unk:
df[voc_name].fillna(unk[voc_name], inplace=True)
else:
voc = vocabularies.get(voc_name)
if not hasattr(voc, 'categories'):
voc = pd.CategoricalDtype(voc)
for df in dfs:
if voc_name in df:
df[voc_name] = df[voc_name].astype(voc)
if verbose:
unk_msg = f"unk {unk[voc_name]}" if voc_name in unk else "no unk"
print(f"Normalized {voc_name}, with given vocabulary and {unk_msg}")
if voc_name in unk:
df[voc_name].fillna(unk[voc_name], inplace=True)
# Reorder vocabularies to keep same order as the vocabulary passed in parameters
vocabularies = dict((*((c, vocabularies[c]) for c in voc_order if c in vocabularies),
*((c, vocabularies[c]) for c in vocabularies if c not in voc_order)))
# Reorder dataframes according to vocabulary order
dfs = [
df[[*(c for c in vocabularies if c in df.columns), *(c for c in df.columns if c not in vocabularies)]]
for df in dfs
]
return dfs, vocabularies
class FasterGroupBy:
def __init__(self, groupby_object, dtypes, name=None):
self.groupby_object = groupby_object
self.dtypes = dtypes
self.name = name
def _retype(self, res):
if self.name is None:
return res.astype(self.dtypes)
return (res.astype(self.dtypes) if self.dtypes is not None else res).reset_index().rename({0: self.name}, axis=1)
def agg(self, *args, **kwargs):
return self._retype(self.groupby_object.agg(*args, **kwargs))
def apply(self, *args, **kwargs):
return self._retype(self.groupby_object.apply(*args, **kwargs))
def __getitem__(self, item):
return FasterGroupBy(self.groupby_object[item], self.dtypes.get(item, None), item if not isinstance(item, (list, tuple)) else None)
class NLStructAccessor(object):
def __init__(self, pandas_obj):
self._obj = pandas_obj
def factorize(self, subset=None, categories=None, group_nans=False,
return_categories=False, freeze_categories=True):
return factorize_rows(self._obj,
subset=subset,
categories=categories,
group_nans=group_nans,
return_categories=return_categories, freeze_categories=freeze_categories)
def flatten(self, *args, **kwargs):
return flatten(self._obj, *args, **kwargs)
def to_flatarray(self, row_column, data_column, n_rows=None):
return df_to_flatarray(self._obj[row_column], self._obj[data_column], n_rows=n_rows)
def to_csr(self, row_column, col_column, data_column=None, n_rows=None, n_cols=None):
return df_to_csr(self._obj[row_column], self._obj[col_column], self._obj[data_column] if data_column is not None else None,
n_rows=n_rows, n_cols=n_cols)
def groupby(self, by, *args, decategorize=None, as_index=False, observed=True, **kwargs):
if not as_index:
if decategorize is None:
decategorize = by
new_dtypes = {k: v if not hasattr(v, 'categories') else v.categories.dtype for k, v in self._obj.dtypes[decategorize].items()}
return FasterGroupBy(self._obj.astype(new_dtypes).groupby(by=by, *args, as_index=as_index, observed=observed, **kwargs), self._obj.dtypes[decategorize])
else:
return self._obj.groupby(by=by, *args, as_index=as_index, **kwargs)
def groupby_assign(self, by, agg, as_index=False, observed=True, **kwargs):
res = self._obj.assign(_index=np.arange(len(self._obj)))
res = res.drop(columns=list(agg.keys())).merge(
# .astype({key: "category" for key in mentions_cluster_ids})
res.groupby(by, observed=observed, **kwargs)
.agg({**agg, "_index": tuple}).reset_index(drop=True)
.nlstruct.flatten("_index"),
how='left',
on='_index',
).drop(columns=["_index"])
if as_index:
res = res.set_index(by)
return res
pd.api.extensions.register_dataframe_accessor("nlstruct")(NLStructAccessor)
pd.api.extensions.register_series_accessor("nlstruct")(NLStructAccessor)
|
py | 1a5285422d7d4124e987636ba4d13451f24b9b43 | '''
Function:
千千音乐下载: http://music.taihe.com/
Author:
Charles
微信公众号:
Charles的皮卡丘
声明:
代码仅供学习交流, 不得用于商业/非法使用.
'''
import os
import click
import requests
from contextlib import closing
'''
Input:
-mode: search(搜索模式)/download(下载模式)
--search模式:
----songname: 搜索的歌名
--download模式:
----need_down_list: 需要下载的歌曲名列表
----savepath: 下载歌曲保存路径
Return:
-search模式:
--search_results: 搜索结果
-download模式:
--downed_list: 成功下载的歌曲名列表
'''
class qianqian():
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'referer': 'http://music.baidu.com/'
}
self.search_url = "http://musicapi.qianqian.com/v1/restserver/ting"
self.player_url = 'http://music.baidu.com/data/music/links'
self.search_results = {}
'''外部调用'''
def get(self, mode='search', **kwargs):
if mode == 'search':
songname = kwargs.get('songname')
self.search_results = self.__searchBySongname(songname)
return self.search_results
elif mode == 'download':
need_down_list = kwargs.get('need_down_list')
downed_list = []
savepath = kwargs.get('savepath') if kwargs.get('savepath') is not None else './results'
if need_down_list is not None:
for download_name in need_down_list:
songid = self.search_results.get(download_name)
params = {"songIds": songid}
res = requests.get(self.player_url, params=params, headers=self.headers)
if not res.json().get('data').get('songList'):
continue
download_url = res.json().get('data').get('songList')[0].get('songLink')
if not download_url:
continue
res = self.__download(download_name, download_url, savepath)
if res:
downed_list.append(download_name)
return downed_list
else:
raise ValueError('mode in qianqian().get must be <search> or <download>...')
'''下载'''
def __download(self, download_name, download_url, savepath):
if not os.path.exists(savepath):
os.mkdir(savepath)
download_name = download_name.replace('<', '').replace('>', '').replace('\\', '').replace('/', '') \
.replace('?', '').replace(':', '').replace('"', '').replace(':', '') \
.replace('|', '').replace('?', '').replace('*', '')
savename = 'qianqian_{}'.format(download_name)
count = 0
while os.path.isfile(os.path.join(savepath, savename+'.mp3')):
count += 1
savename = 'qianqian_{}_{}'.format(download_name, count)
savename += '.mp3'
try:
print('[qianqian-INFO]: 正在下载 --> %s' % savename.split('.')[0])
with closing(requests.get(download_url, headers=self.headers, stream=True, verify=False)) as res:
total_size = int(res.headers['content-length'])
if res.status_code == 200:
label = '[FileSize]:%0.2f MB' % (total_size/(1024*1024))
with click.progressbar(length=total_size, label=label) as progressbar:
with open(os.path.join(savepath, savename), "wb") as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
progressbar.update(1024)
else:
raise RuntimeError('Connect error...')
return True
except:
return False
'''根据歌名搜索'''
def __searchBySongname(self, songname):
params = {
"query": songname,
"method": "baidu.ting.search.common",
"format": "json",
"page_no": 1,
"page_size": 15
}
res = requests.get(self.search_url, params=params, headers=self.headers)
results = {}
for song in res.json()['song_list']:
songid = song.get('song_id')
singers = song.get('author').replace("<em>", "").replace("</em>", "")
album = song.get('album_title').replace("<em>", "").replace("</em>", "")
download_name = '%s--%s--%s' % (song.get('title').replace("<em>", "").replace("</em>", ""), singers, album)
count = 0
while download_name in results:
count += 1
download_name = '%s(%d)--%s--%s' % (song.get('title'), count, singers, album)
results[download_name] = songid
return results
'''测试用'''
if __name__ == '__main__':
qianqian_downloader = qianqian()
res = qianqian_downloader.get(mode='search', songname='尾戒')
qianqian_downloader.get(mode='download', need_down_list=list(res.keys())[:2]) |
py | 1a52864af301ec1aaf53cb5aaaa053527159fd2e | # License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.binning import QuantileDiscretizer
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
n_bins = 4
X = pd.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"C": ["a", "b", "c", "d", "e", "f"],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
"B": {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
"C": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f"},
"D": {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
"F": {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
"A__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B__bin": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F__bin": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16():
n_bins = 4
X = pd.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"C": ["a", "b", "c", "d", "e", "f"],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X[list("ABDF")] = X[list("ABDF")].astype(np.int16)
X_expected = pd.DataFrame(
{
"A": {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
"B": {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
"C": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f"},
"D": {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
"F": {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
"A__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B__bin": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F__bin": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
X_expected[list("ABDF")] = X_expected[list("ABDF")].astype(np.int16)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_inplace():
n_bins = 4
X = pd.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"C": ["a", "b", "c", "d", "e", "f"],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"C": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f"},
"D": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins, inplace=True).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_no_num():
X = pd.DataFrame({"C": ["a", "b", "c", "d", "e", "f"]})
X_expected = pd.DataFrame({"C": ["a", "b", "c", "d", "e", "f"]})
n_bins = 3
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_num():
n_bins = 4
X = pd.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
"B": {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
"D": {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
"F": {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
"A__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B__bin": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F__bin": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_num_inplace():
n_bins = 4
X = pd.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins, inplace=True).fit(X)
return obj, X, X_expected
###
@pytest.fixture
def data_ks():
n_bins = 4
X = ks.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"C": ["a", "b", "c", "d", "e", "f"],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
"B": {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
"C": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f"},
"D": {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
"F": {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
"A__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B__bin": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F__bin": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16_ks():
n_bins = 4
X = ks.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"C": ["a", "b", "c", "d", "e", "f"],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X[list("ABDF")] = X[list("ABDF")].astype(np.int16)
X_expected = pd.DataFrame(
{
"A": {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
"B": {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
"C": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f"},
"D": {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
"F": {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
"A__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B__bin": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F__bin": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
X_expected[list("ABDF")] = X_expected[list("ABDF")].astype(np.int16)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_inplace_ks():
n_bins = 4
X = ks.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"C": ["a", "b", "c", "d", "e", "f"],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"C": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f"},
"D": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins, inplace=True).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_no_num_ks():
n_bins = 3
X = ks.DataFrame({"C": ["a", "b", "c", "d", "e", "f"]})
X_expected = pd.DataFrame({"C": ["a", "b", "c", "d", "e", "f"]})
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_num_ks():
n_bins = 4
X = ks.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: 7.25, 1: 71.2833, 2: 7.925, 3: 53.1, 4: 8.05, 5: 8.4583},
"B": {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0},
"D": {0: 22.0, 1: 38.0, 2: 26.0, 3: 35.0, 4: 35.0, 5: 31.2},
"F": {0: 3, 1: 1, 2: 2, 3: 1, 4: 2, 5: 3},
"A__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B__bin": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D__bin": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F__bin": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_num_inplace_ks():
n_bins = 4
X = ks.DataFrame(
{
"A": [7.25, 71.2833, 7.925, 53.1, 8.05, 8.4583],
"B": [1, 1, 0, 1, 0, 0],
"D": [22.0, 38.0, 26.0, 35.0, 35.0, 31.2],
"F": [3, 1, 2, 1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": {0: "0.0", 1: "3.0", 2: "0.0", 3: "3.0", 4: "1.0", 5: "2.0"},
"B": {0: "2.0", 1: "2.0", 2: "0.0", 3: "2.0", 4: "0.0", 5: "0.0"},
"D": {0: "0.0", 1: "3.0", 2: "0.0", 3: "2.0", 4: "2.0", 5: "1.0"},
"F": {0: "3.0", 1: "0.0", 2: "1.0", 3: "0.0", 4: "1.0", 5: "3.0"},
}
)
obj = QuantileDiscretizer(n_bins, inplace=True).fit(X)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
X_new = X_new.to_pandas()
assert_frame_equal(X_new, X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
def test_no_num_pd(data_no_num):
obj, X, X_expected = data_no_num
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_no_num_ks(data_no_num_ks):
obj, X, X_expected = data_no_num_ks
X_new = obj.transform(X)
X_new = X_new.to_pandas()
assert_frame_equal(X_new, X_expected)
def test_no_num_pd_np(data_no_num):
obj, X, X_expected = data_no_num
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_no_num_ks_np(data_no_num_ks):
obj, X, X_expected = data_no_num_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
def test_num_pd(data_num):
obj, X, X_expected = data_num
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_num_ks(data_num_ks):
obj, X, X_expected = data_num_ks
X_new = obj.transform(X)
X_new = X_new.to_pandas()
assert_frame_equal(X_new, X_expected)
def test_num_pd_np(data_num):
obj, X, X_expected = data_num
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_num_ks_np(data_num_ks):
obj, X, X_expected = data_num_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
# # inplace
def test_inplace_pd(data_inplace):
obj, X, X_expected = data_inplace
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_inplace_ks(data_inplace_ks):
obj, X, X_expected = data_inplace_ks
X_new = obj.transform(X)
X_new = X_new.to_pandas()
assert_frame_equal(X_new, X_expected)
def test_inplace_pd_np(data_inplace):
obj, X, X_expected = data_inplace
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_inplace_ks_np(data_inplace_ks):
obj, X, X_expected = data_inplace_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
def test_inplace_num_pd(data_num_inplace):
obj, X, X_expected = data_num_inplace
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_inplace_num_ks(data_num_inplace_ks):
obj, X, X_expected = data_num_inplace_ks
X_new = obj.transform(X)
X_new = X_new.to_pandas()
assert_frame_equal(X_new, X_expected)
def test_inplace_num_pd_np(data_num_inplace):
obj, X, X_expected = data_num_inplace
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_inplace_num_ks_np(data_num_inplace_ks):
obj, X, X_expected = data_num_inplace_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(
X_numpy_new, columns=X_expected.columns, index=X_expected.index
)
assert_frame_equal(X_new, X_expected.astype(object))
def test_init():
with pytest.raises(TypeError):
_ = QuantileDiscretizer(n_bins="a")
with pytest.raises(TypeError):
_ = QuantileDiscretizer(n_bins=2, inplace="a")
|
py | 1a52877b6d69374c4a9d4dd564e4e8360dda4b8f | # Written by Dr Daniel Buscombe, Marda Science LLC
#
# MIT License
#
# Copyright (c) 2020, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
os.environ["TF_DETERMINISTIC_OPS"] = "1"
##calcs
import tensorflow as tf #numerical operations on gpu
import numpy as np
import matplotlib.pyplot as plt
SEED=42
np.random.seed(SEED)
AUTO = tf.data.experimental.AUTOTUNE # used in tf.data.Dataset API
tf.random.set_seed(SEED)
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print('GPU name: ', tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
TARGET_SIZE = 1024
BATCH_SIZE = 4
@tf.autograph.experimental.do_not_convert
#-----------------------------------
def read_seg_tfrecord_multiclass(example):
"""
"read_seg_tfrecord_multiclass(example)"
This function reads an example from a TFrecord file into a single image and label
This is the "multiclass" version for imagery, where the classes are mapped as follows:
INPUTS:
* TFRecord example object
OPTIONAL INPUTS: None
GLOBAL INPUTS: TARGET_SIZE
OUTPUTS:
* image [tensor array]
* class_label [tensor array]
"""
features = {
"image": tf.io.FixedLenFeature([], tf.string), # tf.string = bytestring (not text string)
"label": tf.io.FixedLenFeature([], tf.string), # shape [] means scalar
}
# decode the TFRecord
example = tf.io.parse_single_example(example, features)
image = tf.image.decode_png(example['image'], channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [TARGET_SIZE,TARGET_SIZE, 3])
#image = tf.reshape(tf.image.rgb_to_grayscale(image), [TARGET_SIZE,TARGET_SIZE, 1])
label = tf.image.decode_png(example['label'], channels=1)
label = tf.cast(label, tf.uint8)#/ 255.0
label = tf.reshape(label, [TARGET_SIZE,TARGET_SIZE, 1])
cond = tf.equal(label, tf.ones(tf.shape(label),dtype=tf.uint8)*7)
label = tf.where(cond, tf.ones(tf.shape(label),dtype=tf.uint8)*6, label)
label = tf.one_hot(tf.cast(label, tf.uint8), 6) #6 = 5 classes (undamaged, minor, major, destroyed, unclass) + null (0)
label = tf.squeeze(label)
image = tf.reshape(image, (image.shape[0], image.shape[1], image.shape[2]))
#image = tf.image.per_image_standardization(image)
return image, label
#-----------------------------------
def get_batched_dataset(filenames):
"""
"get_batched_dataset(filenames)"
This function defines a workflow for the model to read data from
tfrecord files by defining the degree of parallelism, batch size, pre-fetching, etc
and also formats the imagery properly for model training
INPUTS:
* filenames [list]
OPTIONAL INPUTS: None
GLOBAL INPUTS: BATCH_SIZE, AUTO
OUTPUTS: tf.data.Dataset object
"""
option_no_order = tf.data.Options()
option_no_order.experimental_deterministic = True
dataset = tf.data.Dataset.list_files(filenames)
dataset = dataset.with_options(option_no_order)
dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=16, num_parallel_calls=AUTO)
dataset = dataset.map(read_seg_tfrecord_multiclass, num_parallel_calls=AUTO)
#dataset = dataset.cache() # This dataset fits in RAM
dataset = dataset.repeat()
#dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) # drop_remainder will be needed on TPU
dataset = dataset.prefetch(AUTO) #
return dataset
# from tensorflow.python.client import device_lib
#
# def get_available_devices():
# local_device_protos = device_lib.list_local_devices()
# return [x.name for x in local_device_protos if x.device_type == 'GPU' or x.device_type == 'CPU']
#==================================================================
for storm in ['matthew', 'michael', 'florence', 'harvey']:
imdir = '/media/marda/TWOTB1/xBD/hurricanes/images/'+storm
lab_path = '/media/marda/TWOTB1/xBD/hurricanes/labels2D/'+storm
tfrecord_dir = '/media/marda/TWOTB1/xBD/hurricanes/tfrecords/'+storm+'/imseg'
# # Run inference on CPU
# with tf.device('/cpu:0'):
##test
filenames = sorted(tf.io.gfile.glob(tfrecord_dir+'/*.jpg'))
dataset = get_batched_dataset(filenames)
B = []
for imgs,lbls in dataset.take(1):
for count,(im,lab) in enumerate(zip(imgs,lbls)):
print(np.shape(lab))
lab= np.argmax(lab,axis=-1)
B.append(np.bincount(lab.flatten(),minlength=6))
plt.subplot(int(BATCH_SIZE/2),int(BATCH_SIZE/2),count+1)
plt.imshow(im)
del im
plt.imshow(lab, alpha=0.5, cmap='bwr')
plt.axis('off')
del lab
plt.show()
np.sum(np.vstack(B),axis=0)
|
py | 1a5287a8674c0d88ac89f0b0f949c1a2149f6102 | ############################################################
# log
############################################################
# Contains the custom logger object to be used.
import logging
import sys
import os
def setup_custom_logger():
"""Setups the custom logger to be used globally.
The logger object can be referenced via 'root' in logging.getLogger().
Returns:
The logger object to be used in the script.
"""
logging.basicConfig(filename=os.getcwd() + '\\output.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
log = logging.getLogger()
log.setLevel(logging.INFO)
stdout_handler = logging.StreamHandler(sys.stdout)
log.addHandler(stdout_handler)
return log
def get_logger():
"""Returns the logger object to be used.
"""
log = setup_custom_logger() if not logging.getLogger('root').hasHandlers() \
else logging.getLogger('root')
return log
|
py | 1a52887a535a50fcdead0e0b92fba79786737004 | #!/usr/bin/env python2
from db.models import *
import sys
allTweets = Tweet.select().count()
byClassification = Tweet.select(Tweet.classification, fn.COUNT(Tweet.id).alias('num_tweets')).group_by(Tweet.classification)
for classification in byClassification:
print str(classification.classification)+": "+str(classification.num_tweets)
|
py | 1a5288f8fa81521725a01544637d01a32fddb136 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from read_excel_MIK import read_excel_MIK
from read_excel_Seichitech import read_excel_Seichitech
from openpyxl import load_workbook
import pandas
class read_write_excel:
"""
For read/write and parse excel file
"""
def __init__(self, filename):
'''
support both MIK and Seichitech platform
'''
self.filename = filename
fd = pandas.ExcelFile(self.filename)
sheet_names = fd.sheet_names
fd.close()
if "Result Data" in sheet_names and "Graph" in sheet_names and "Raw Data" in sheet_names:
self.fd = read_excel_MIK(self.filename)
else:
self.fd = read_excel_Seichitech(self.filename)
def read_config(self):
'''
read boundary, max_x, max_y information
'''
return self.fd.read_config()
def read_report_time(self):
'''
read report time for line test
'''
return self.fd.read_report_time()
def read_target(self):
'''
get target from excel
'''
return self.fd.read_target()
def read_measure(self):
'''
get measure data from excel
'''
return self.fd.read_measure()
def read_target_and_measure(self):
'''
read target and measure at the same time
'''
target_list = self.read_target()
measure_list_mm, measure_list_pixel = self.read_measure()
return target_list, measure_list_mm, measure_list_pixel
def write_excel(self, write_data):
'''
write output to new sheet
'''
df = pandas.DataFrame(write_data)
book = load_workbook(self.filename)
sheet_names = book.sheetnames
for name in sheet_names:
if "analysis_output" in name:
book.remove(book[name])
writer = pandas.ExcelWriter(self.filename, engine = 'openpyxl')
writer.book = book
df.to_excel(writer, sheet_name='analysis_output', index=False)
work_sheet = book["analysis_output"]
for col in work_sheet.columns:
max_length = 0
column = col[0].column # Get the column name
for cell in col:
try: # Necessary to avoid error on empty cells
if len(str(cell.value)) > max_length:
max_length = len(cell.value)
except:
pass
adjusted_width = (max_length + 2) * 1.2
work_sheet.column_dimensions[column].width = adjusted_width
writer.save()
writer.close()
def destroy(self):
'''
destroy excel fd
'''
self.fd.destroy()
if __name__ == "__main__":
"""
This is for test purpose
"""
fd = read_write_excel("../H_Line/Result.xlsx")
test_type, max_x, max_y, boundary_range = fd.read_config()
target_list, measure_list = fd.read_target_and_measure()
# print("This is %s, max_x = %f, max_y = %f, boundary_range = %f" % ( test_type, max_x, max_y, boundary_range ))
# for i in range(len(target_list)):
# print("\nNO.%d target line ---------------------- (%f, %f) -> (%f, %f)" % ( i + 1, target_list[i][0], target_list[i][1], target_list[i][2], target_list[i][3] ))
# for j in range(len(measure_list[i])):
# print("\tNO.%d measured point ---------------------------- (%f, %f)" % ( j + 1, measure_list[i][j][0], measure_list[i][j][1] ))
# print("\n")
test_dict = {'a':[1,2,3], 'b':[2,3,4], 'c':[3,4,5]}
fd.write_excel(test_dict)
fd = read_write_excel("../POINTS/20180918175024.xlsx")
test_type, max_x, max_y, boundary_range = fd.read_config()
target_list, measure_list = fd.read_target_and_measure()
# print("This is %s, max_x = %f, max_y = %f, boundary_range = %f" % ( test_type, max_x, max_y, boundary_range ))
# for i in range(len(target_list)):
# print("\nNO.%d target point ---------------------- (%f, %f)" % ( i + 1, target_list[i][0], target_list[i][1] ))
# for j in range(len(measure_list[i])):
# print("\tRepeat %d" % ( j + 1 ))
# for k in range(len(measure_list[i][j])):
# print("\t\tNO.%d measured point ---------------------------- (%f, %f)" % ( k + 1, measure_list[i][j][k][0], measure_list[i][j][k][1] ))
# print("\n")
|
py | 1a5289983ff83edbf32e69b6fc3025ff3443aa93 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If C(state=absent), the module will remove the given key configuration if it exists.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
options:
key_id:
description:
- Authentication key identifier (numeric).
md5string:
description:
- MD5 String.
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
choices: [ 'false', 'true' ]
default: 'false'
authentication:
description:
- Turns NTP authentication on or off.
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
command = {
'command': command,
'output': 'json',
}
else:
command = {
'command': command,
'output': 'text',
}
return run_commands(module, [command])
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(command, module)[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (r".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
r"md5\s(?P<md5string>\S+)\s(?P<atype>\S+).*")
body = execute_show_command(command, module)[0]
try:
match_authentication = re.match(auth_regex, body, re.DOTALL)
group_authentication = match_authentication.groupdict()
authentication_key['key_id'] = group_authentication['key_id']
authentication_key['md5string'] = group_authentication['md5string']
if group_authentication['atype'] == '7':
authentication_key['auth_type'] = 'encrypt'
else:
authentication_key['auth_type'] = 'text'
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt':
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
if key_id and md5string:
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
if key_id:
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication:
auth_remove_cmds.append(
'no ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(type='str'),
md5string=dict(type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
if key_id:
if not trusted_key and not md5string:
module.fail_json(msg='trusted_key or md5string MUST be specified')
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, delta.get('auth_type'),
delta.get('trusted_key'), delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
auth_toggle = None
if existing.get('authentication') == 'on':
auth_toggle = True
if not existing.get('key_id'):
key_id = None
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.