text
stringlengths 4
1.02M
| meta
dict |
---|---|
from AccActionsContainer import AccActionsContainer
from AccNode import AccNode
from AccLattice import AccLattice
from AccNodeBunchTracker import AccNodeBunchTracker
__all__ = []
__all__.append("AccActionsContainer")
__all__.append("AccNode")
__all__.append("AccLattice")
__all__.append("AccNodeBunchTracker")
| {
"content_hash": "cd957208faadf406ed8efab94713ae40",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 36.77777777777778,
"alnum_prop": 0.7371601208459214,
"repo_name": "azukov/py-orbit",
"id": "cb7fcfd5273f01ec7c42f2ff2184bebc076ee750",
"size": "730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/orbit/lattice/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1754740"
},
{
"name": "Dockerfile",
"bytes": "232"
},
{
"name": "Makefile",
"bytes": "13194"
},
{
"name": "Python",
"bytes": "1025246"
},
{
"name": "Shell",
"bytes": "2982"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_utils import importutils as i_utils
from cloudkitty import config # noqa
from cloudkitty import service
from cloudkitty import storage
from cloudkitty import utils as ck_utils
from cloudkitty import write_orchestrator
CONF = cfg.CONF
CONF.import_opt('period', 'cloudkitty.collector', 'collect')
CONF.import_opt('backend', 'cloudkitty.config', 'output')
CONF.import_opt('basepath', 'cloudkitty.config', 'output')
STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
class DBCommand(object):
def __init__(self):
self._storage = None
self._output = None
self._load_storage_backend()
self._load_output_backend()
def _load_storage_backend(self):
self._storage = storage.get_storage()
def _load_output_backend(self):
backend = i_utils.import_class(CONF.output.backend)
self._output = backend
def generate(self):
if not CONF.command.tenant:
if not CONF.command.begin:
CONF.command.begin = ck_utils.get_month_start()
if not CONF.command.end:
CONF.command.end = ck_utils.get_next_month()
tenants = self._storage.get_tenants(CONF.command.begin,
CONF.command.end)
else:
tenants = [CONF.command.tenant]
for tenant in tenants:
wo = write_orchestrator.WriteOrchestrator(self._output,
tenant,
self._storage,
CONF.output.basepath)
wo.init_writing_pipeline()
if not CONF.command.begin:
wo.restart_month()
wo.process()
def tenants_list(self):
if not CONF.command.begin:
CONF.command.begin = ck_utils.get_month_start()
if not CONF.command.end:
CONF.command.end = ck_utils.get_next_month()
tenants = self._storage.get_tenants(CONF.command.begin,
CONF.command.end)
print('Tenant list:')
for tenant in tenants:
print(tenant)
def call_generate(command_object):
command_object.generate()
def call_tenants_list(command_object):
command_object.tenants_list()
def add_command_parsers(subparsers):
parser = subparsers.add_parser('generate')
parser.set_defaults(func=call_generate)
parser.add_argument('--tenant', nargs='?')
parser.add_argument('--begin', nargs='?')
parser.add_argument('--end', nargs='?')
parser = subparsers.add_parser('tenants_list')
parser.set_defaults(func=call_tenants_list)
parser.add_argument('--begin', nargs='?')
parser.add_argument('--end', nargs='?')
command_opt = cfg.SubCommandOpt('command',
title='Command',
help='Available commands',
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
service.prepare_service()
command_object = DBCommand()
CONF.command.func(command_object)
| {
"content_hash": "677966bdf46998a01c5c401c12440dd4",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 75,
"avg_line_length": 33.197916666666664,
"alnum_prop": 0.5842485095701286,
"repo_name": "openstack/cloudkitty",
"id": "a090d98eb8d6fc52d05326d86f4e347c4be8f779",
"size": "3820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitty/cli/writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "4904"
},
{
"name": "Python",
"bytes": "1046196"
},
{
"name": "Shell",
"bytes": "16361"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Cache/redisEnterprise")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class RedisEnterpriseOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.redisenterprise.RedisEnterpriseManagementClient`'s
:attr:`redis_enterprise` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_initial(
self, resource_group_name: str, cluster_name: str, parameters: Union[_models.Cluster, IO], **kwargs: Any
) -> _models.Cluster:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Cluster] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Cluster")
request = build_create_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Cluster", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Cluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}"
}
@overload
def begin_create(
self,
resource_group_name: str,
cluster_name: str,
parameters: _models.Cluster,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Cluster]:
"""Creates or updates an existing (overwrite/recreate, with potential downtime) cache cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:param parameters: Parameters supplied to the Create RedisEnterprise operation. Required.
:type parameters: ~azure.mgmt.redisenterprise.models.Cluster
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create(
self,
resource_group_name: str,
cluster_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Cluster]:
"""Creates or updates an existing (overwrite/recreate, with potential downtime) cache cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:param parameters: Parameters supplied to the Create RedisEnterprise operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create(
self, resource_group_name: str, cluster_name: str, parameters: Union[_models.Cluster, IO], **kwargs: Any
) -> LROPoller[_models.Cluster]:
"""Creates or updates an existing (overwrite/recreate, with potential downtime) cache cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:param parameters: Parameters supplied to the Create RedisEnterprise operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.redisenterprise.models.Cluster or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Cluster] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Cluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}"
}
def _update_initial(
self, resource_group_name: str, cluster_name: str, parameters: Union[_models.ClusterUpdate, IO], **kwargs: Any
) -> Optional[_models.Cluster]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.Cluster]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ClusterUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("Cluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
cluster_name: str,
parameters: _models.ClusterUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Cluster]:
"""Updates an existing RedisEnterprise cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:param parameters: Parameters supplied to the Update RedisEnterprise operation. Required.
:type parameters: ~azure.mgmt.redisenterprise.models.ClusterUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
cluster_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Cluster]:
"""Updates an existing RedisEnterprise cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:param parameters: Parameters supplied to the Update RedisEnterprise operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self, resource_group_name: str, cluster_name: str, parameters: Union[_models.ClusterUpdate, IO], **kwargs: Any
) -> LROPoller[_models.Cluster]:
"""Updates an existing RedisEnterprise cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:param parameters: Parameters supplied to the Update RedisEnterprise operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.redisenterprise.models.ClusterUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Cluster] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Cluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, cluster_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}"
}
@distributed_trace
def begin_delete(self, resource_group_name: str, cluster_name: str, **kwargs: Any) -> LROPoller[None]:
"""Deletes a RedisEnterprise cache cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}"
}
@distributed_trace
def get(self, resource_group_name: str, cluster_name: str, **kwargs: Any) -> _models.Cluster:
"""Gets information about a RedisEnterprise cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: The name of the RedisEnterprise cluster. Required.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster or the result of cls(response)
:rtype: ~azure.mgmt.redisenterprise.models.Cluster
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.Cluster] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Cluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise/{clusterName}"
}
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Cluster"]:
"""Lists all RedisEnterprise clusters in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Cluster or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ClusterList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ClusterList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redisEnterprise"
}
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Cluster"]:
"""Gets all RedisEnterprise clusters in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Cluster or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.redisenterprise.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ClusterList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ClusterList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Cache/redisEnterprise"}
| {
"content_hash": "dc57477ccf100da99512e80cd301768f",
"timestamp": "",
"source": "github",
"line_count": 1034,
"max_line_length": 139,
"avg_line_length": 46.05512572533849,
"alnum_prop": 0.6397177715713656,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ad80398bc0e98e1597d5174bf0cc1f0604aaeaa9",
"size": "48121",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/redisenterprise/azure-mgmt-redisenterprise/azure/mgmt/redisenterprise/operations/_redis_enterprise_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import datetime
from webob.datetime_utils import UTC
import webmachine.exc
def b03(res, req, resp):
"Options?"
if req.method == 'OPTIONS':
for (header, value) in res.options(req, resp):
resp[header] = value
return True
return False
def b04(res, req, resp):
"Request entity too large?"
return not res.valid_entity_length(req, resp)
def b05(res, req, resp):
"Unknown Content-Type?"
return not res.known_content_type(req, resp)
def b06(res, req, resp):
"Unknown or unsupported Content-* header?"
return not res.valid_content_headers(req, resp)
def b07(res, req, resp):
"Forbidden?"
return res.forbidden(req, resp)
def b08(res, req, resp):
"Authorized?"
auth = res.is_authorized(req, resp)
if auth is True:
return True
elif isinstance(auth, basestring):
resp["WWW-Authenticate"] = auth
return False
def b09(res, req, resp):
"Malformed?"
return res.malformed_request(req, resp)
def b10(res, req, resp):
"Is method allowed?"
if req.method in res.allowed_methods(req, resp):
return True
return False
def b11(res, req, resp):
"URI too long?"
return res.uri_too_long(req, resp)
def b12(res, req, resp):
"Known method?"
return req.method in res.known_methods(req, resp)
def b13(res, req, resp):
"Service available?"
return res.ping(req, resp) and res.service_available(req, resp)
def c03(res, req, resp):
"Accept exists?"
return "HTTP_ACCEPT" in req.META
def c04(res, req, resp):
"Acceptable media type available?"
ctypes = [ctype for (ctype, func) in res.content_types_provided(req, resp)]
ctype = req.accept.best_match(ctypes)
if ctype is None:
return False
resp.content_type = ctype
return True
def d04(res, req, resp):
"Accept-Language exists?"
return "HTTP_ACCEPT_LANGUAGE" in req.META
def d05(res, req, resp):
"Accept-Language available?"
langs = res.languages_provided(req, resp)
if langs is not None:
lang = req.accept_language.best_match(langs)
if lang is None:
return False
resp.content_language = lang
return True
def e05(res, req, resp):
"Accept-Charset exists?"
return "HTTP_ACCEPT_CHARSET" in req.META
def e06(res, req, resp):
"Acceptable charset available?"
charsets = res.charsets_provided(req, resp)
if charsets is not None:
charset = req.accept_charset.best_match(charsets)
if charset is None:
return False
resp._charset = charset
return True
def f06(res, req, resp):
"Accept-Encoding exists?"
return "HTTP_ACCEPT_ENCODING" in req.META
def f07(res, req, resp):
"Acceptable encoding available?"
encodings = res.encodings_provided(req, resp)
if encodings is not None:
encodings = [enc for (enc, func) in encodings]
enc = req.accept_encoding.best_match(encodings)
if enc is None:
return False
resp.content_encoding = enc
return True
def g07(res, req, resp):
"Resource exists?"
# Set variances now that conneg is done
hdr = []
if len(res.content_types_provided(req, resp) or []) > 1:
hdr.append("Accept")
if len(res.charsets_provided(req, resp) or []) > 1:
hdr.append("Accept-Charset")
if len(res.encodings_provided(req, resp) or []) > 1:
hdr.append("Accept-Encoding")
if len(res.languages_provided(req, resp) or []) > 1:
hdr.append("Accept-Language")
hdr.extend(res.variances(req, resp))
resp.vary = hdr
return res.resource_exists(req, resp)
def g08(res, req, resp):
"If-Match exists?"
return "HTTP_IF_MATCH" in req.META
def g09(res, req, resp):
"If-Match: * exists?"
return '*' in req.if_match
def g11(res, req, resp):
"Etag in If-Match?"
return res.generate_etag(req, resp) in req.if_match
def h07(res, req, resp):
"If-Match: * exists?"
# Need to recheck that if-match was an actual header
# because WebOb is says that '*' will match no header.
return 'HTTP_IF_MATCH' in req.META and '*' in req.if_match
def h10(res, req, resp):
"If-Unmodified-Since exists?"
return "HTTP_IF_MODIFIED_SINCE" in req.META
def h11(res, req, resp):
"If-Unmodified-Since is a valid date?"
return req.if_unmodified_since is not None
def h12(res, req, resp):
"Last-Modified > If-Unmodified-Since?"
if not req.if_unmodified_since:
return True
resp.last_modified = res.last_modified(req, resp)
return resp.last_modified > req.if_unmodified_since
def i04(res, req, resp):
"Apply to a different URI?"
uri = res.moved_permanently(req, resp)
if not uri:
return False
resp.location = uri
return True
def i07(res, req, resp):
"PUT?"
return req.method == "PUT"
def i12(res, req, resp):
"If-None-Match exists?"
return "HTTP_IF_NONE_MATCH" in req.META
def i13(res, req, resp):
"If-None-Match: * exists?"
return '*' in req.if_none_match
def j18(res, req, resp):
"GET/HEAD?"
return req.method in ["GET", "HEAD"]
def k05(res, req, resp):
"Resource moved permanently?"
uri = res.moved_permanently(req, resp)
if not uri:
return False
resp.location = uri
return True
def k07(res, req, resp):
"Resource previously existed?"
return res.previously_existed(req, resp)
def k13(res, req, resp):
"Etag in If-None-Match?"
resp.etag = res.generate_etag(req, resp)
return resp.etag in req.if_none_match
def l05(res, req, resp):
"Resource moved temporarily?"
uri = res.moved_temporarily(req, resp)
if not uri:
return False
resp.location = uri
return True
def l07(res, req, resp):
"POST?"
return req.method == "POST"
def l13(res, req, resp):
"If-Modified-Since exists?"
return "HTTP_IF_MODIFIED_SINCE" in req.META
def l14(res, req, resp):
"If-Modified-Since is a valid date?"
return req.if_modified_since is not None
def l15(res, req, resp):
"If-Modified-Since > Now?"
return req.if_modified_since > datetime.datetime.now(UTC)
def l17(res, req, resp):
"Last-Modified > If-Modified-Since?"
resp.last_modified = res.last_modified(req, resp)
if not (req.if_modified_since and resp.last_modified):
return True
return resp.last_modified > req.if_modified_since
def m05(res, req, resp):
"POST?"
return req.method == "POST"
def m07(res, req, resp):
"Server permits POST to missing resource?"
return res.allow_missing_post(req, resp)
def m16(res, req, resp):
"DELETE?"
return req.method == "DELETE"
def m20(res, req, resp):
"""Delete enacted immediayly?
Also where DELETE is forced."""
return res.delete_resource(req, resp)
def m20b(res, req, resp):
""" Delete completed """
return res.delete_completed(req, resp)
def n05(res, req, resp):
"Server permits POST to missing resource?"
return res.allow_missing_post(req, resp)
def n11(res, req, resp):
"Redirect?"
if res.post_is_create(req, resp):
handle_request_body(res, req, resp)
else:
if not res.process_post(req, resp):
raise webmachine.exc.HTTPInternalServerError("Failed to process POST.")
return False
resp.location = res.created_location(req, resp)
if resp.location:
return True
return False
def n16(res, req, resp):
"POST?"
return req.method == "POST"
def o14(res, req, resp):
"Is conflict?"
if not res.is_conflict(req, resp):
handle_response_body(res, req, resp)
return False
return True
def o16(res, req, resp):
"PUT?"
return req.method == "PUT"
def o18(res, req, resp):
"Multiple representations? (Build GET/HEAD body)"
if req.method not in ["GET", "HEAD"]:
return res.multiple_choices(req, resp)
handle_response_body(res, req, resp)
return res.multiple_choices(req, resp)
def o20(res, req, resp):
"Response includes entity?"
return bool(resp._container)
def p03(res, req, resp):
"Conflict?"
if res.is_conflict(req, resp):
return True
handle_request_body(res, req, resp)
return False
def p11(res, req, resp):
"New resource?"
if not resp.location:
return False
return True
def first_match(func, req, resp, expect):
for (key, value) in func(req, resp):
if key == expect:
return value
return None
def handle_request_body(res, req, resp):
ctype = req.content_type or "application/octet-stream"
mtype = ctype.split(";", 1)[0]
func = first_match(res.content_types_accepted, req, resp, mtype)
if func is None:
raise webmachine.exc.HTTPUnsupportedMediaType()
func(req, resp)
def handle_response_body(res, req, resp):
resp.etag = res.generate_etag(req, resp)
resp.last_modified = res.last_modified(req, resp)
resp.expires = res.expires(req, resp)
# Generate the body
func = first_match(res.content_types_provided, req, resp, resp.content_type)
if func is None:
raise webmachine.exc.HTTPInternalServerError()
body = func(req, resp)
if not resp.content_type:
resp.content_type = "text/plain"
# Handle our content encoding.
encoding = resp.content_encoding
if encoding:
func = first_match(res.encodings_provided, req, resp, encoding)
if func is None:
raise webmachine.exc.HTTPInternalServerError()
resp.body = func(resp.body)
resp['Content-Encoding'] = encoding
if not isinstance(body, basestring) and hasattr(body, '__iter__'):
resp._container = body
resp._is_string = False
else:
resp._container = [body]
resp._is_string = True
TRANSITIONS = {
b03: (200, c03), # Options?
b04: (413, b03), # Request entity too large?
b05: (415, b04), # Unknown Content-Type?
b06: (501, b05), # Unknown or unsupported Content-* header?
b07: (403, b06), # Forbidden?
b08: (b07, 401), # Authorized?
b09: (400, b08), # Malformed?
b10: (b09, 405), # Is method allowed?
b11: (414, b10), # URI too long?
b12: (b11, 501), # Known method?
b13: (b12, 503), # Service available?
c03: (c04, d04), # Accept exists?
c04: (d04, 406), # Acceptable media type available?
d04: (d05, e05), # Accept-Language exists?
d05: (e05, 406), # Accept-Language available?
e05: (e06, f06), # Accept-Charset exists?
e06: (f06, 406), # Acceptable charset available?
f06: (f07, g07), # Accept-Encoding exists?
f07: (g07, 406), # Acceptable encoding available?
g07: (g08, h07), # Resource exists?
g08: (g09, h10), # If-Match exists?
g09: (h10, g11), # If-Match: * exists?
g11: (h10, 412), # Etag in If-Match?
h07: (412, i07), # If-Match: * exists?
h10: (h11, i12), # If-Unmodified-Since exists?
h11: (h12, i12), # If-Unmodified-Since is valid date?
h12: (412, i12), # Last-Modified > If-Unmodified-Since?
i04: (301, p03), # Apply to a different URI?
i07: (i04, k07), # PUT?
i12: (i13, l13), # If-None-Match exists?
i13: (j18, k13), # If-None-Match: * exists?
j18: (304, 412), # GET/HEAD?
k05: (301, l05), # Resource moved permanently?
k07: (k05, l07), # Resource previously existed?
k13: (j18, l13), # Etag in If-None-Match?
l05: (307, m05), # Resource moved temporarily?
l07: (m07, 404), # POST?
l13: (l14, m16), # If-Modified-Since exists?
l14: (l15, m16), # If-Modified-Since is valid date?
l15: (m16, l17), # If-Modified-Since > Now?
l17: (m16, 304), # Last-Modified > If-Modified-Since?
m05: (n05, 410), # POST?
m07: (n11, 404), # Server permits POST to missing resource?
m16: (m20, n16), # DELETE?
m20: (m20b, 500), # DELETE enacted immediately?
m20b: (o20, 202), # Delete completeed?
m20: (o20, 202), # Delete enacted?
n05: (n11, 410), # Server permits POST to missing resource?
n11: (303, p11), # Redirect?
n16: (n11, o16), # POST?
o14: (409, p11), # Conflict?
o16: (o14, o18), # PUT?
o18: (300, 200), # Multiple representations?
o20: (o18, 204), # Response includes entity?
p03: (409, p11), # Conflict?
p11: (201, o20) # New resource?
}
| {
"content_hash": "0075effd9f43838557ac200950d11cd5",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 83,
"avg_line_length": 28.90823529411765,
"alnum_prop": 0.6274621520429757,
"repo_name": "benoitc/dj-webmachine",
"id": "ef53f09fba1daf32b7dc89801a0b8a9e9f868c75",
"size": "12420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webmachine/decisions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1515"
},
{
"name": "HTML",
"bytes": "2446"
},
{
"name": "JavaScript",
"bytes": "18922"
},
{
"name": "Python",
"bytes": "120249"
}
],
"symlink_target": ""
} |
import os
import sys
import argparse
from PIL import Image
from PIL import ImageDraw
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
sys.path.append(os.path.join(g_render4cnn_root_folder, 'render_pipeline'))
if __name__ == '__main__':
# Feel free to change input image file, result file, and model obj file for rendering
# --------------------------------
#### CHANGE BEG ####
output_result_file = os.path.join(BASE_DIR, 'est-view-topk.txt')
model_obj_file = os.path.join(g_render4cnn_root_folder, 'demo_render', 'sample_model', 'model.obj')
original_image_file = os.path.join(BASE_DIR, 'chair_image.jpg')
rendered_image_file_prefix = os.path.join(BASE_DIR, 'chair_in_estimated_view')
io_redirect = '' #' > /dev/null 2>&1'
#### CHANGE END ####
# --------------------------------
# Display estimated viewpoints that are read from result file
# suppress tilt for chairs
estimated_viewpoints = [[float(x) for x in line.rstrip().split(' ')] for line in open(output_result_file,'r')]
v = estimated_viewpoints[0]
topk = len(v)/4
print("Estimated views and confidence: ")
for k in range(topk):
a,e,t,c = v[4*k : 4*k+4]
print('rank:%d, confidence:%f, azimuth:%d, elevation:%d' % (k+1, c, a, e))
# Render images in estimated views
for k in range(topk):
a,e = v[4*k : 4*k+2]
python_cmd = 'python %s -m %s -a %s -e %s -t %s -d %s -o %s' % (os.path.join(g_render4cnn_root_folder, 'demo_render', 'render_class_view.py'),
model_obj_file, str(a), str(e), str(0), str(2.0), rendered_image_file_prefix+str(k)+'.png')
print ">> Running rendering command: \n \t %s" % (python_cmd)
os.system('%s %s' % (python_cmd, io_redirect))
print("Show both original aeroplane photo and rendered aeroplane in estimated view:")
im1 = Image.open(original_image_file)
im2s = []
for k in range(topk):
im2 = Image.open(rendered_image_file_prefix+str(k)+'.png')
bbox = im2.getbbox()
im2 = im2.crop(bbox)
draw = ImageDraw.Draw(im2)
draw.text((0,0), 'rank: %d, confidence: %f\nazimuth=%f, elevation=%f' % (k+1, v[4*k+3], v[4*k], v[4*k+1]), (0,255,0))
im2s.append(im2)
im1.show()
for k in range(topk):
im2s[k].show()
| {
"content_hash": "e414bbc47bd57384bf66658f006b5f1a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 151,
"avg_line_length": 40.38333333333333,
"alnum_prop": 0.5955427156417664,
"repo_name": "ShapeNet/RenderForCNN",
"id": "dd83df813fa7ba8c86a33450f786271d2b42b5e1",
"size": "2466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_view/run_visualize_3dview_topk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8544"
},
{
"name": "C++",
"bytes": "159496"
},
{
"name": "Matlab",
"bytes": "202245"
},
{
"name": "Python",
"bytes": "62143"
},
{
"name": "Shell",
"bytes": "3082"
}
],
"symlink_target": ""
} |
from os.path import join
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._build_utils.system_info import get_info
from scipy._build_utils import (uses_blas64, blas_ilp64_pre_build_hook,
combine_dict)
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f',
'd_lpk.f']
if uses_blas64():
blas_info = get_info('blas_ilp64_opt')
pre_build_hook = blas_ilp64_pre_build_hook(blas_info)
else:
blas_info = get_info('blas_opt')
pre_build_hook = None
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src,
_pre_build_hook=pre_build_hook)
sources = ['__odrpack.c']
cfg = combine_dict(blas_info, numpy_nodepr_api,
libraries=['odrpack'],
include_dirs=['.'])
ext = config.add_extension('__odrpack',
sources=sources,
depends=(['odrpack.h'] + odrpack_src),
**cfg
)
ext._pre_build_hook = pre_build_hook
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"content_hash": "dd444e35fccaf9fc8ed0ccd32b0ecdcd",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 31.127659574468087,
"alnum_prop": 0.5721120984278879,
"repo_name": "perimosocordiae/scipy",
"id": "0c00548f81a9b3432cebf97b4f14e8a4851ae442",
"size": "1463",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "scipy/odr/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4620237"
},
{
"name": "C++",
"bytes": "959068"
},
{
"name": "Cython",
"bytes": "1059810"
},
{
"name": "Dockerfile",
"bytes": "16894"
},
{
"name": "Fortran",
"bytes": "5211680"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "143727"
},
{
"name": "Python",
"bytes": "15434780"
},
{
"name": "R",
"bytes": "3059"
},
{
"name": "Shell",
"bytes": "18009"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""Python client for InfluxDB v0.8."""
import warnings
import json
import socket
import requests
import requests.exceptions
from six.moves import xrange
from six.moves.urllib.parse import urlparse
from influxdb import chunked_json
session = requests.Session()
class InfluxDBClientError(Exception):
"""Raised when an error occurs in the request."""
def __init__(self, content, code=-1):
"""Initialize an InfluxDBClientError handler."""
super(InfluxDBClientError, self).__init__(
"{0}: {1}".format(code, content))
self.content = content
self.code = code
class InfluxDBClient(object):
"""Define the standard InfluxDBClient for influxdb v0.8.
The ``InfluxDBClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
:param host: hostname to connect to InfluxDB, defaults to 'localhost'
:type host: string
:param port: port to connect to InfluxDB, defaults to 'localhost'
:type port: int
:param username: user to connect, defaults to 'root'
:type username: string
:param password: password of the user, defaults to 'root'
:type password: string
:param database: database name to connect to, defaults is None
:type database: string
:param ssl: use https instead of http to connect to InfluxDB, defaults is
False
:type ssl: boolean
:param verify_ssl: verify SSL certificates for HTTPS requests, defaults is
False
:type verify_ssl: boolean
:param retries: number of retries your client will try before aborting,
defaults to 3. 0 indicates try until success
:type retries: int
:param timeout: number of seconds Requests will wait for your client to
establish a connection, defaults to None
:type timeout: int
:param use_udp: use UDP to connect to InfluxDB, defaults is False
:type use_udp: int
:param udp_port: UDP port to connect to InfluxDB, defaults is 4444
:type udp_port: int
"""
def __init__(self,
host='localhost',
port=8086,
username='root',
password='root',
database=None,
ssl=False,
verify_ssl=False,
timeout=None,
retries=3,
use_udp=False,
udp_port=4444):
"""Construct a new InfluxDBClient object."""
self._host = host
self._port = port
self._username = username
self._password = password
self._database = database
self._timeout = timeout
self._retries = retries
self._verify_ssl = verify_ssl
self._use_udp = use_udp
self._udp_port = udp_port
if use_udp:
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._scheme = "http"
if ssl is True:
self._scheme = "https"
self._baseurl = "{0}://{1}:{2}".format(
self._scheme,
self._host,
self._port)
self._headers = {
'Content-type': 'application/json',
'Accept': 'text/plain'}
@staticmethod
def from_dsn(dsn, **kwargs):
r"""Return an instaance of InfluxDBClient from given data source name.
Returns an instance of InfluxDBClient from the provided data source
name. Supported schemes are "influxdb", "https+influxdb",
"udp+influxdb". Parameters for the InfluxDBClient constructor may be
also be passed to this function.
Examples:
>> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
... localhost:8086/databasename', timeout=5)
>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
... localhost:8086/databasename', timeout=5, udp_port=159)
>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
:param dsn: data source name
:type dsn: string
:param **kwargs: additional parameters for InfluxDBClient.
:type **kwargs: dict
:note: parameters provided in **kwargs may override dsn parameters.
:note: when using "udp+influxdb" the specified port (if any) will be
used for the TCP connection; specify the udp port with the additional
udp_port parameter (cf. examples).
:raise ValueError: if the provided DSN has any unexpected value.
"""
init_args = {}
conn_params = urlparse(dsn)
scheme_info = conn_params.scheme.split('+')
if len(scheme_info) == 1:
scheme = scheme_info[0]
modifier = None
else:
modifier, scheme = scheme_info
if scheme != 'influxdb':
raise ValueError('Unknown scheme "{0}".'.format(scheme))
if modifier:
if modifier == 'udp':
init_args['use_udp'] = True
elif modifier == 'https':
init_args['ssl'] = True
else:
raise ValueError('Unknown modifier "{0}".'.format(modifier))
if conn_params.hostname:
init_args['host'] = conn_params.hostname
if conn_params.port:
init_args['port'] = conn_params.port
if conn_params.username:
init_args['username'] = conn_params.username
if conn_params.password:
init_args['password'] = conn_params.password
if conn_params.path and len(conn_params.path) > 1:
init_args['database'] = conn_params.path[1:]
init_args.update(kwargs)
return InfluxDBClient(**init_args)
# Change member variables
def switch_database(self, database):
"""Change client database.
:param database: the new database name to switch to
:type database: string
"""
self._database = database
def switch_db(self, database):
"""Change client database.
DEPRECATED.
"""
warnings.warn(
"switch_db is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.switch_database(database)`` instead.",
FutureWarning)
return self.switch_database(database)
def switch_user(self, username, password):
"""Change client username.
:param username: the new username to switch to
:type username: string
:param password: the new password to switch to
:type password: string
"""
self._username = username
self._password = password
def request(self, url, method='GET', params=None, data=None,
expected_response_code=200):
"""Make a http request to API."""
url = "{0}/{1}".format(self._baseurl, url)
if params is None:
params = {}
auth = {
'u': self._username,
'p': self._password
}
params.update(auth)
if data is not None and not isinstance(data, str):
data = json.dumps(data)
retry = True
_try = 0
# Try to send the request more than once by default (see #103)
while retry:
try:
response = session.request(
method=method,
url=url,
params=params,
data=data,
headers=self._headers,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
_try += 1
if self._retries != 0:
retry = _try < self._retries
else:
raise requests.exceptions.ConnectionError
if response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code)
def write(self, data):
"""Provide as convenience for influxdb v0.9.0, this may change."""
self.request(
url="write",
method='POST',
params=None,
data=data,
expected_response_code=200
)
return True
# Writing Data
#
# Assuming you have a database named foo_production you can write data
# by doing a POST to /db/foo_production/series?u=some_user&p=some_password
# with a JSON body of points.
def write_points(self, data, time_precision='s', *args, **kwargs):
"""Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
def list_chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
batch_size = kwargs.get('batch_size')
if batch_size and batch_size > 0:
for item in data:
name = item.get('name')
columns = item.get('columns')
point_list = item.get('points', [])
for batch in list_chunks(point_list, batch_size):
item = [{
"points": batch,
"name": name,
"columns": columns
}]
self._write_points(
data=item,
time_precision=time_precision)
return True
return self._write_points(data=data,
time_precision=time_precision)
def write_points_with_precision(self, data, time_precision='s'):
"""Write to multiple time series names.
DEPRECATED.
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self._write_points(data=data, time_precision=time_precision)
def _write_points(self, data, time_precision):
if time_precision not in ['s', 'm', 'ms', 'u']:
raise Exception(
"Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
if self._use_udp and time_precision != 's':
raise Exception(
"InfluxDB only supports seconds precision for udp writes"
)
url = "db/{0}/series".format(self._database)
params = {
'time_precision': time_precision
}
if self._use_udp:
self.send_packet(data)
else:
self.request(
url=url,
method='POST',
params=params,
data=data,
expected_response_code=200
)
return True
# One Time Deletes
def delete_points(self, name):
"""Delete an entire series."""
url = "db/{0}/series/{1}".format(self._database, name)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True
# Regularly Scheduled Deletes
def create_scheduled_delete(self, json_body):
"""Create schedule delete from database.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
# get list of deletes
# curl http://localhost:8086/db/site_dev/scheduled_deletes
#
# remove a regularly scheduled delete
# curl -X DELETE http://localhost:8086/db/site_dev/scheduled_deletes/:id
def get_list_scheduled_delete(self):
"""Get list of scheduled deletes.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def remove_scheduled_delete(self, delete_id):
"""Remove scheduled delete.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def query(self, query, time_precision='s', chunked=False):
"""Query data from the influxdb v0.8 database.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
return self._query(query, time_precision=time_precision,
chunked=chunked)
# Querying Data
#
# GET db/:name/series. It takes five parameters
def _query(self, query, time_precision='s', chunked=False):
if time_precision not in ['s', 'm', 'ms', 'u']:
raise Exception(
"Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
if chunked is True:
chunked_param = 'true'
else:
chunked_param = 'false'
# Build the URL of the series to query
url = "db/{0}/series".format(self._database)
params = {
'q': query,
'time_precision': time_precision,
'chunked': chunked_param
}
response = self.request(
url=url,
method='GET',
params=params,
expected_response_code=200
)
if chunked:
try:
decoded = chunked_json.loads(response.content.decode())
except UnicodeDecodeError:
decoded = chunked_json.loads(response.content.decode('utf-8'))
return list(decoded)
return response.json()
# Creating and Dropping Databases
#
# ### create a database
# curl -X POST http://localhost:8086/db -d '{"name": "site_development"}'
#
# ### drop a database
# curl -X DELETE http://localhost:8086/db/site_development
def create_database(self, database):
"""Create a database on the InfluxDB server.
:param database: the name of the database to create
:type database: string
:rtype: boolean
"""
url = "db"
data = {'name': database}
self.request(
url=url,
method='POST',
data=data,
expected_response_code=201
)
return True
def delete_database(self, database):
"""Drop a database on the InfluxDB server.
:param database: the name of the database to delete
:type database: string
:rtype: boolean
"""
url = "db/{0}".format(database)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True
# ### get list of databases
# curl -X GET http://localhost:8086/db
def get_list_database(self):
"""Get the list of databases."""
url = "db"
response = self.request(
url=url,
method='GET',
expected_response_code=200
)
return response.json()
def get_database_list(self):
"""Get the list of databases.
DEPRECATED.
"""
warnings.warn(
"get_database_list is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.get_list_database`` instead.",
FutureWarning)
return self.get_list_database()
def delete_series(self, series):
"""Drop a series on the InfluxDB server.
:param series: the name of the series to delete
:type series: string
:rtype: boolean
"""
url = "db/{0}/series/{1}".format(
self._database,
series
)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True
def get_list_series(self):
"""Get a list of all time series in a database."""
response = self._query('list series')
return [series[1] for series in response[0]['points']]
def get_list_continuous_queries(self):
"""Get a list of continuous queries."""
response = self._query('list continuous queries')
return [query[2] for query in response[0]['points']]
# Security
# get list of cluster admins
# curl http://localhost:8086/cluster_admins?u=root&p=root
# add cluster admin
# curl -X POST http://localhost:8086/cluster_admins?u=root&p=root \
# -d '{"name": "paul", "password": "i write teh docz"}'
# update cluster admin password
# curl -X POST http://localhost:8086/cluster_admins/paul?u=root&p=root \
# -d '{"password": "new pass"}'
# delete cluster admin
# curl -X DELETE http://localhost:8086/cluster_admins/paul?u=root&p=root
# Database admins, with a database name of site_dev
# get list of database admins
# curl http://localhost:8086/db/site_dev/admins?u=root&p=root
# add database admin
# curl -X POST http://localhost:8086/db/site_dev/admins?u=root&p=root \
# -d '{"name": "paul", "password": "i write teh docz"}'
# update database admin password
# curl -X POST http://localhost:8086/db/site_dev/admins/paul?u=root&p=root\
# -d '{"password": "new pass"}'
# delete database admin
# curl -X DELETE \
# http://localhost:8086/db/site_dev/admins/paul?u=root&p=root
def get_list_cluster_admins(self):
"""Get list of cluster admins."""
response = self.request(
url="cluster_admins",
method='GET',
expected_response_code=200
)
return response.json()
def add_cluster_admin(self, new_username, new_password):
"""Add cluster admin."""
data = {
'name': new_username,
'password': new_password
}
self.request(
url="cluster_admins",
method='POST',
data=data,
expected_response_code=200
)
return True
def update_cluster_admin_password(self, username, new_password):
"""Update cluster admin password."""
url = "cluster_admins/{0}".format(username)
data = {
'password': new_password
}
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
return True
def delete_cluster_admin(self, username):
"""Delete cluster admin."""
url = "cluster_admins/{0}".format(username)
self.request(
url=url,
method='DELETE',
expected_response_code=200
)
return True
def set_database_admin(self, username):
"""Set user as database admin."""
return self.alter_database_admin(username, True)
def unset_database_admin(self, username):
"""Unset user as database admin."""
return self.alter_database_admin(username, False)
def alter_database_admin(self, username, is_admin):
"""Alter the database admin."""
url = "db/{0}/users/{1}".format(self._database, username)
data = {'admin': is_admin}
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
return True
def get_list_database_admins(self):
"""Get list of database admins.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def add_database_admin(self, new_username, new_password):
"""Add cluster admin.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def update_database_admin_password(self, username, new_password):
"""Update database admin password.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def delete_database_admin(self, username):
"""Delete database admin.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
###
# Limiting User Access
# Database users
# get list of database users
# curl http://localhost:8086/db/site_dev/users?u=root&p=root
# add database user
# curl -X POST http://localhost:8086/db/site_dev/users?u=root&p=root \
# -d '{"name": "paul", "password": "i write teh docz"}'
# update database user password
# curl -X POST http://localhost:8086/db/site_dev/users/paul?u=root&p=root \
# -d '{"password": "new pass"}'
# delete database user
# curl -X DELETE http://localhost:8086/db/site_dev/users/paul?u=root&p=root
def get_database_users(self):
"""Get list of database users."""
url = "db/{0}/users".format(self._database)
response = self.request(
url=url,
method='GET',
expected_response_code=200
)
return response.json()
def add_database_user(self, new_username, new_password, permissions=None):
"""Add database user.
:param permissions: A ``(readFrom, writeTo)`` tuple
"""
url = "db/{0}/users".format(self._database)
data = {
'name': new_username,
'password': new_password
}
if permissions:
try:
data['readFrom'], data['writeTo'] = permissions
except (ValueError, TypeError):
raise TypeError(
"'permissions' must be (readFrom, writeTo) tuple"
)
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
return True
def update_database_user_password(self, username, new_password):
"""Update password."""
return self.alter_database_user(username, new_password)
def alter_database_user(self, username, password=None, permissions=None):
"""Alter a database user and/or their permissions.
:param permissions: A ``(readFrom, writeTo)`` tuple
:raise TypeError: if permissions cannot be read.
:raise ValueError: if neither password nor permissions provided.
"""
url = "db/{0}/users/{1}".format(self._database, username)
if not password and not permissions:
raise ValueError("Nothing to alter for user {0}.".format(username))
data = {}
if password:
data['password'] = password
if permissions:
try:
data['readFrom'], data['writeTo'] = permissions
except (ValueError, TypeError):
raise TypeError(
"'permissions' must be (readFrom, writeTo) tuple"
)
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
if username == self._username:
self._password = password
return True
def delete_database_user(self, username):
"""Delete database user."""
url = "db/{0}/users/{1}".format(self._database, username)
self.request(
url=url,
method='DELETE',
expected_response_code=200
)
return True
# update the user by POSTing to db/site_dev/users/paul
def update_permission(self, username, json_body):
"""Update read/write permission.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def send_packet(self, packet):
"""Send a UDP packet along the wire."""
data = json.dumps(packet)
byte = data.encode('utf-8')
self.udp_socket.sendto(byte, (self._host, self._udp_port))
| {
"content_hash": "ac58be1d8201619c4141f346c026ab79",
"timestamp": "",
"source": "github",
"line_count": 842,
"max_line_length": 79,
"avg_line_length": 30.72209026128266,
"alnum_prop": 0.5583732797278491,
"repo_name": "tzonghao/influxdb-python",
"id": "965a91dbbc66ee82a6d663ceb8d8338a7dba4247",
"size": "25892",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "influxdb/influxdb08/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "297368"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import re
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.functional import SimpleLazyObject
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, six.string_types):
return re.compile(regex, flags)
else:
assert not flags, "flags must be empty if regex is passed pre-compiled"
return regex
return SimpleLazyObject(_compile)
@deconstructible
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, six.string_types):
raise TypeError("If the flags are set, regex must be a regular expression string.")
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validates that the input matches the regular expression
if inverse_match is False, otherwise raises ValidationError.
"""
if not (self.inverse_match is not bool(self.regex.search(
force_text(value)))):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
def __ne__(self, other):
return not (self == other)
@deconstructible
class URLValidator(RegexValidator):
ul = '\u00a1-\uffff' # unicode letters range (must be a unicode string, not a raw string)
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later)
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?'
# Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*'
tld_re = (
r'\.' # dot
r'(?!-)' # can't start with a dash
r'(?:[a-z' + ul + '-]{2,63}' # domain label
r'|xn--[a-z0-9]{1,59})' # or punycode label
r'(?<!-)' # can't end with a dash
r'\.?' # may have a trailing dot
)
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
r'(?:\S+(?::\S*)?@)?' # user:pass authentication
r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')'
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # resource path
r'\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super(URLValidator, self).__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
value = force_text(value)
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
try:
scheme, netloc, path, query, fragment = urlsplit(value)
except ValueError: # for example, "Invalid IPv6 URL"
raise ValidationError(self.message, code=self.code)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
# Now verify IPv6 in the netloc part
host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc)
if host_match:
potential_ip = host_match.groups()[0]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(self.message, code=self.code)
url = value
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
# one byte for the length of the name and one byte for the trailing dot
# that's used to indicate absolute names in DNS.
if len(urlsplit(value).netloc) > 253:
raise ValidationError(self.message, code=self.code)
integer_validator = RegexValidator(
_lazy_re_compile(r'^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = _lazy_re_compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE)
literal_regex = _lazy_re_compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(
slug_re,
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z', re.U)
validate_unicode_slug = RegexValidator(
slug_unicode_re,
_("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."),
'invalid'
)
ipv4_re = _lazy_re_compile(r'^(25[0-5]|2[0-4][0-9]|[0-1]?[0-9]?[0-9])(\.(25[0-5]|2[0-4][0-9]|[0-1]?[0-9]?[0-9])){3}\Z')
validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False):
regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % {
'neg': '(-)?' if allow_negative else '',
'sep': re.escape(sep),
})
return RegexValidator(regexp, message=message, code=code)
validate_comma_separated_integer_list = int_list_validator(
message=_('Enter only digits separated by commas.'),
)
@deconstructible
class BaseValidator(object):
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.limit_value == other.limit_value and
self.message == other.message and
self.code == other.code
)
def compare(self, a, b):
return a is not b
def clean(self, x):
return x
@deconstructible
class MaxValueValidator(BaseValidator):
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
def compare(self, a, b):
return a > b
@deconstructible
class MinValueValidator(BaseValidator):
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
def compare(self, a, b):
return a < b
@deconstructible
class MinLengthValidator(BaseValidator):
message = ungettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
def compare(self, a, b):
return a < b
def clean(self, x):
return len(x)
@deconstructible
class MaxLengthValidator(BaseValidator):
message = ungettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
def compare(self, a, b):
return a > b
def clean(self, x):
return len(x)
@deconstructible
class DecimalValidator(object):
"""
Validate that the input does not exceed the maximum number of digits
expected, otherwise raise ValidationError.
"""
messages = {
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'
),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'
),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'
),
}
def __init__(self, max_digits, decimal_places):
self.max_digits = max_digits
self.decimal_places = decimal_places
def __call__(self, value):
digit_tuple, exponent = value.as_tuple()[1:]
decimals = abs(exponent)
# digit_tuple doesn't include any leading zeros.
digits = len(digit_tuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None and
whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.max_digits == other.max_digits and
self.decimal_places == other.decimal_places
)
@deconstructible
class FileExtensionValidator(object):
message = _(
"File extension '%(extension)s' is not allowed. "
"Allowed extensions are: '%(allowed_extensions)s'."
)
code = 'invalid_extension'
def __init__(self, allowed_extensions=None, message=None, code=None):
self.allowed_extensions = allowed_extensions
if message is not None:
self.message = message
if code is not None:
self.code = code
def __call__(self, value):
extension = os.path.splitext(value.name)[1][1:].lower()
if self.allowed_extensions is not None and extension not in self.allowed_extensions:
raise ValidationError(
self.message,
code=self.code,
params={
'extension': extension,
'allowed_extensions': ', '.join(self.allowed_extensions)
}
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.allowed_extensions == other.allowed_extensions and
self.message == other.message and
self.code == other.code
)
def get_available_image_extensions():
try:
from PIL import Image
except ImportError:
return []
else:
Image.init()
return [ext.lower()[1:] for ext in Image.EXTENSION.keys()]
validate_image_file_extension = FileExtensionValidator(
allowed_extensions=get_available_image_extensions(),
)
| {
"content_hash": "79960d4784d0759708992f25a3b1dca5",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 119,
"avg_line_length": 35.42574257425743,
"alnum_prop": 0.5771380659586361,
"repo_name": "guettli/django",
"id": "067a3743eb8e6d52e82c25380eead256a457ae7e",
"size": "17890",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django/core/validators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12149968"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import numpy as np
import pytest
from PIL import Image
from imgavg import imgavg
def test_happy_path(args):
with tempfile.TemporaryDirectory() as temp_dir:
args["output"] = os.path.join(temp_dir, "out.png")
result = imgavg.average(args)
im = Image.open("tests/assets/happy_path/1.png")
expected = np.array(im).astype(np.uint8)
assert np.array_equal(result, expected)
@pytest.mark.xfail(raises=imgavg.InconsistentImageError)
def test_inconsistent_image_sizes(args):
"""you cant mix and match array shapes"""
args["folder"] = "tests/assets/inconsistent_images/"
imgavg.average(args)
@pytest.mark.xfail(raises=imgavg.InsufficientImagesError)
def test_no_images_in_folder(args):
"""you cant average images if there are no images"""
with tempfile.TemporaryDirectory() as temp_dir:
args["folder"] = temp_dir
imgavg.average(args)
@pytest.mark.xfail(raises=imgavg.InsufficientImagesError)
def test_not_enough_images(args):
"""one picture isnt enough to average"""
with tempfile.TemporaryDirectory() as temp_dir:
shutil.copy("tests/assets/happy_path/1.png", os.path.join(temp_dir, "1.png"))
args["folder"] = temp_dir
imgavg.average(args)
| {
"content_hash": "0f71a829f5e2cc714116bb8f6d1a59d2",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 85,
"avg_line_length": 29.113636363636363,
"alnum_prop": 0.7033567525370804,
"repo_name": "DoWhileGeek/imgavg",
"id": "db4e0accbf630a70622e7c628d992e806326b000",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_imgavg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5927"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^django_test/','PersonalSite.views.django_test',name='django_test'),
url(r'^robots.txt\.txt$',TemplateView.as_view(template_name='robots.html')),
url(r'^presentation/','PersonalSite.views.presentation',name='presentation'),
url(r'^test$','PersonalSite.views.test',name='test'),
# Admin
url(r'^admin/', include(admin.site.urls)),
# Ajax urls
(dajaxice_config.dajaxice_url,include('dajaxice.urls')),
# blog
url(r'^blog/',include('blog.urls')),
url(r'^project/',include('projects.urls')),
# project short cuts
(r'^xkcd-clock/$', RedirectView.as_view(url='/project/xkcd-clock/')),
url(r'^$','PersonalSite.views.homepage', name='homepage'),
url(r'','PersonalSite.views.homepage_redirect',name='homepage_redirect'),
) + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
| {
"content_hash": "b106b8d15a4c2711a394a4618f1cbc15",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 81,
"avg_line_length": 30,
"alnum_prop": 0.7281481481481481,
"repo_name": "joeyuan19/flaming-bear",
"id": "c6767eb8aa69b35d1ae9cff5353c526d35fb8f9b",
"size": "1350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PersonalSite/PersonalSite/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119739"
},
{
"name": "HTML",
"bytes": "122359"
},
{
"name": "JavaScript",
"bytes": "306560"
},
{
"name": "Python",
"bytes": "39481"
},
{
"name": "Shell",
"bytes": "21829"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import random
import functools
import numpy as np
import paddle
from imgtool import process_image
random.seed(0)
DATA_DIR = "./data/Stanford_Online_Products/"
TRAIN_LIST = './data/Stanford_Online_Products/Ebay_train.txt'
VAL_LIST = './data/Stanford_Online_Products/Ebay_test.txt'
def init_sop(mode):
if mode == 'train':
train_data = {}
train_image_list = []
train_list = open(TRAIN_LIST, "r").readlines()
for i, item in enumerate(train_list):
items = item.strip().split()
if items[0] == 'image_id':
continue
path = items[3]
label = int(items[1]) - 1
train_image_list.append((path, label))
if label not in train_data:
train_data[label] = []
train_data[label].append(path)
random.shuffle(train_image_list)
print("{} dataset size: {}".format(mode, len(train_data)))
return train_data, train_image_list
else:
val_data = {}
val_image_list = []
test_image_list = []
val_list = open(VAL_LIST, "r").readlines()
for i, item in enumerate(val_list):
items = item.strip().split()
if items[0] == 'image_id':
continue
path = items[3]
label = int(items[1])
val_image_list.append((path, label))
test_image_list.append(path)
if label not in val_data:
val_data[label] = []
val_data[label].append(path)
print("{} dataset size: {}".format(mode, len(val_data)))
if mode == 'val':
return val_data, val_image_list
else:
return test_image_list
def common_iterator(data, settings):
batch_size = settings.train_batch_size
samples_each_class = settings.samples_each_class
assert (batch_size % samples_each_class == 0)
class_num = batch_size // samples_each_class
def train_iterator():
count = 0
labs = list(data.keys())
lab_num = len(labs)
ind = list(range(0, lab_num))
while True:
random.shuffle(ind)
ind_sample = ind[:class_num]
for ind_i in ind_sample:
lab = labs[ind_i]
data_list = data[lab]
data_ind = list(range(0, len(data_list)))
random.shuffle(data_ind)
anchor_ind = data_ind[:samples_each_class]
for anchor_ind_i in anchor_ind:
anchor_path = DATA_DIR + data_list[anchor_ind_i]
yield anchor_path, lab
count += 1
if count >= settings.total_iter_num + 1:
return
return train_iterator
def triplet_iterator(data, settings):
batch_size = settings.train_batch_size
assert (batch_size % 3 == 0)
def train_iterator():
total_count = settings.train_batch_size * (settings.total_iter_num + 1)
count = 0
labs = list(data.keys())
lab_num = len(labs)
ind = list(range(0, lab_num))
while True:
random.shuffle(ind)
ind_pos, ind_neg = ind[:2]
lab_pos = labs[ind_pos]
pos_data_list = data[lab_pos]
data_ind = list(range(0, len(pos_data_list)))
random.shuffle(data_ind)
anchor_ind, pos_ind = data_ind[:2]
lab_neg = labs[ind_neg]
neg_data_list = data[lab_neg]
neg_ind = random.randint(0, len(neg_data_list) - 1)
anchor_path = DATA_DIR + pos_data_list[anchor_ind]
yield anchor_path, lab_pos
pos_path = DATA_DIR + pos_data_list[pos_ind]
yield pos_path, lab_pos
neg_path = DATA_DIR + neg_data_list[neg_ind]
yield neg_path, lab_neg
count += 3
if count >= total_count:
return
return train_iterator
def arcmargin_iterator(data, settings):
def train_iterator():
total_count = settings.train_batch_size * (settings.total_iter_num + 1)
count = 0
while True:
for items in data:
path, label = items
path = DATA_DIR + path
yield path, label
count += 1
if count >= total_count:
return
return train_iterator
def image_iterator(data, mode):
def val_iterator():
for items in data:
path, label = items
path = DATA_DIR + path
yield path, label
def test_iterator():
for item in data:
path = item
path = DATA_DIR + path
yield [path]
if mode == 'val':
return val_iterator
else:
return test_iterator
def createreader(settings, mode):
def metric_reader():
if mode == 'train':
train_data, train_image_list = init_sop('train')
loss_name = settings.loss_name
if loss_name in ["softmax", "arcmargin"]:
return arcmargin_iterator(train_image_list, settings)()
elif loss_name == 'triplet':
return triplet_iterator(train_data, settings)()
else:
return common_iterator(train_data, settings)()
elif mode == 'val':
val_data, val_image_list = init_sop('val')
return image_iterator(val_image_list, 'val')()
else:
test_image_list = init_sop('test')
return image_iterator(test_image_list, 'test')()
image_shape = settings.image_shape.split(',')
assert(image_shape[1] == image_shape[2])
image_size = int(image_shape[2])
keep_order = False if mode != 'train' or settings.loss_name in ['softmax', 'arcmargin'] else True
image_mapper = functools.partial(process_image,
mode=mode, color_jitter=False, rotate=False, crop_size=image_size)
reader = paddle.reader.xmap_readers(
image_mapper, metric_reader, 8, 1000, order=keep_order)
return reader
def train(settings):
return createreader(settings, "train")
def test(settings):
return createreader(settings, "val")
def infer(settings):
return createreader(settings, "test")
| {
"content_hash": "299f34e9983ae81a3d8976d6b3a33faf",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 101,
"avg_line_length": 33.78835978835979,
"alnum_prop": 0.5490134669589728,
"repo_name": "kuke/models",
"id": "ac8f257ecbadbb08454ffc616b1c587455cc92b6",
"size": "6386",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/PaddleCV/metric_learning/reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
} |
from addresses.models import Person, Organisation
from django import forms
class PersonForm(forms.ModelForm):
"""
ModelForm for creating/updating Person instances. All field properties are inherited from
the Person model.
"""
class Meta:
model = Person
fields = (
'title',
'firstname',
'middlename',
'surname',
'telephone',
'email',
'organisation',
'address_line1',
'address_line2',
'address_line3',
'address_city',
'address_county',
'address_postcode',
)
class OrganisationForm(forms.ModelForm):
"""
ModelForm for creating/updating Organisation instances. All field properties are inherited from
the Organisation model.
"""
class Meta:
model = Organisation
fields = (
'org_name',
'telephone',
'email',
'address_line1',
'address_line2',
'address_line3',
'address_city',
'address_county',
'address_postcode',
)
| {
"content_hash": "c47041f6e11fc0643d5e656fc21d8eec",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 99,
"avg_line_length": 23.4,
"alnum_prop": 0.5196581196581197,
"repo_name": "Alexceptional/Adm-Web-Assignment",
"id": "c32fae6537417cce1ea0556bc8312de78c550848",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "addresses/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "121872"
},
{
"name": "HTML",
"bytes": "159133"
},
{
"name": "JavaScript",
"bytes": "702355"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "19940"
},
{
"name": "Ruby",
"bytes": "1030"
},
{
"name": "Shell",
"bytes": "1044"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
def addr2lineQuery(binary, addr):
line_query = ["addr2line", "-e", binary, addr]
assert not os.path.exists("tmpaddrquery.out")
fout = open("tmpaddrquery.out", "w")
subprocess.call(line_query, stdout=fout, stderr=fout)
assert os.path.exists("tmpaddrquery.out")
line_result = open("tmpaddrquery.out", "r").readlines()
assert len(line_result) == 1
line_info = line_result[0].strip()
os.remove("tmpaddrquery.out")
assert not os.path.exists("tmpaddrquery.out")
return line_info[line_info.rfind("/"):].strip()
def main():
if len(sys.argv) != 3:
print
print "usage: python generateScheduleInfo.py [binary]",
print " [output of relaxed serializer]"
print
print "purpose: attempts to parse the output of the relaxed",
print "serializer and generate human readable information about",
print "where context switches occurred in an execution. Outputs ",
print "to switch_summary.log"
print
sys.exit(0)
assert os.path.exists(sys.argv[1])
assert os.path.exists(sys.argv[2])
binary = sys.argv[1]
trace = sys.argv[2]
readable_trace = []
switch_list = []
fin = open(trace, "r").readlines()
for x in fin:
if "Preemption(" in x:
switch_list.append(x.strip())
elif "NonPreemptive" in x:
switch_list.append(x.strip())
if "0x" in x:
addr = x.find("0x")
q = x[addr:]
closeBracket = False
if ")" in q:
closeBracket = True
q.strip(")")
debuginfo = addr2lineQuery(binary, q)
if "??" in debuginfo:
readable_trace.append(x)
else:
if closeBracket:
readable_trace.append(x[:addr] + debuginfo + ")\n")
else:
readable_trace.append(x[:addr] + debuginfo + "\n")
else:
readable_trace.append(x)
outfile = open("readable_trace.log", "w")
for x in readable_trace:
outfile.write(x)
outfile.close()
outfile = open("switch_summary.log", "w")
while len(switch_list) > 0:
item = switch_list.pop(0)
item_list = item.split(":")
assert len(item_list) == 4
string = ""
if "Preemption" in item_list[0]:
string += "Preemptive Context Switch from thread "
else:
assert "NonPreemptive" in item_list[0]
string += "Non-preemptive Context Switch from thread "
string += item_list[1] + " to thread " + item_list[2] + " at "
line_info = addr2lineQuery(binary, item_list[3])
string += line_info + "\n"
outfile.write(string)
print string,
outfile.close()
if __name__ == "__main__":
main()
| {
"content_hash": "ee7cb47a4814febdcfa137454f1707a4",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 75,
"avg_line_length": 27.571428571428573,
"alnum_prop": 0.5557858376511227,
"repo_name": "mdsalman729/flexpret_project",
"id": "2477257dcc579f43f9fdfec19c5c430ec211ff35",
"size": "3141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emulator/concurrit-poplsyntax/concurrit-poplsyntax/bench/pfscan/inputs/in2/simpl/generateScheduleInfo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "585665"
},
{
"name": "C",
"bytes": "3585015"
},
{
"name": "C++",
"bytes": "4891351"
},
{
"name": "CSS",
"bytes": "145407"
},
{
"name": "HTML",
"bytes": "431236"
},
{
"name": "JavaScript",
"bytes": "267745"
},
{
"name": "Makefile",
"bytes": "57803"
},
{
"name": "Perl",
"bytes": "12789"
},
{
"name": "Python",
"bytes": "287349"
},
{
"name": "Scala",
"bytes": "115368"
},
{
"name": "Shell",
"bytes": "152527"
},
{
"name": "Tcl",
"bytes": "7348"
},
{
"name": "Verilog",
"bytes": "587703"
}
],
"symlink_target": ""
} |
import os
import sys
import re
from distutils.dep_util import newer_group, newer
from glob import glob
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
config = Configuration('isolve',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError,'no lapack/blas resources found'
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
Util = ['STOPTEST2.f.src','getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
config.add_extension('_iterative',
sources = [join('iterative',x) for x in sources],
extra_info = lapack_opt
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"content_hash": "a9e8dab8f03f9d06135e7f7eae6aa75c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 29.377777777777776,
"alnum_prop": 0.5832072617246596,
"repo_name": "huard/scipy-work",
"id": "5cfbbffc1014694abd3c83891c4f2891df754a81",
"size": "1396",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/sparse/linalg/isolve/setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from datapackage_pipelines_knesset.common.base_processors.base import BaseProcessor
from datapackage_pipelines.utilities.resources import PROP_STREAMING
class BaseResourceProcessor(BaseProcessor):
"""Base class for processing a single resource"""
def __init__(self, *args, **kwargs):
super(BaseResourceProcessor, self).__init__(*args, **kwargs)
# the descriptor of the selected resource (only 1 resource is processed by this processor)
self._resource_descriptor = None
# the selected resource number
self._resource_number = None
def _get_schema(self, resource_descriptor):
# can be extended to provide a hard-coded schema
# or to modify the schema from the input resource descriptor
return resource_descriptor.get("schema", {"fields": []})
def _get_output_resource_name(self):
return self._parameters.get("resource")
def _get_output_resource_path(self):
return "data/{}.csv".format(self._get_output_resource_name())
def _is_matching_resource_descriptor(self, resource_number, resource_descriptor):
# see the comment on _is_matching_resource_number
return resource_descriptor["name"] == self._get_output_resource_name()
def _is_matching_resource_number(self, resource_number, resource_descriptor=None):
# this is called from both _filter_resource_descriptors and filter_resources
# the first one that matches will store the resource number
# for example, if resource_descriptor matched an input resource -
# it will use the same nubmer for matching the output resource
if self._resource_number is None:
if not resource_descriptor:
resource_descriptor = self._get_resource_descriptor(resource_number)
if self._is_matching_resource_descriptor(resource_number, resource_descriptor):
self._resource_number = resource_number
return True
else:
return False
else:
return self._resource_number == resource_number
def _filter_resource_descriptors(self, resource_descriptors):
filtered_descriptors = []
for resource_number, resource_descriptor in enumerate(resource_descriptors):
if self._is_matching_resource_number(resource_number, resource_descriptor):
resource_descriptor = self._filter_resource_descriptor(resource_number, resource_descriptor)
filtered_descriptors.append(resource_descriptor)
return filtered_descriptors
def _filter_resource_descriptor(self, resource_number, resource_descriptor):
# allows to modify the resource descriptor
# if you just need to modify the schema - you should extend _get_schema instead
self._schema = self._get_schema(resource_descriptor)
resource_descriptor = dict(resource_descriptor, **{"name": self._get_output_resource_name(),
"path": self._get_output_resource_path(),
"schema": self._schema,
PROP_STREAMING: True})
self._resource_descriptor = resource_descriptor
return resource_descriptor
def _filter_resources(self, resources):
# modified to only call filter methods for the matching resource
# other resources are yielded as-is without any processing
for resource_number, resource_data in enumerate(resources):
if self._is_matching_resource_number(resource_number):
yield self._filter_resource(resource_number, resource_data)
else:
yield resource_data
def _filter_resource(self, resource_number, resource_data):
# this method is called only for the matching resource
# it should be extended to provide code to run before or after iterating over the data
self._delay_limit_initialize()
yield from super(BaseResourceProcessor, self)._filter_resource(resource_number, resource_data)
def _filter_row(self, resource_number, row):
# this method is called only the matching resource's rows
for row in super(BaseResourceProcessor, self)._filter_row(resource_number, row):
if self._delay_limit_check():
self._incr_stat("delay limit skipped rows")
else:
yield row
| {
"content_hash": "84fd4fe1fe46a2d868c03a6379ac2a1e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 108,
"avg_line_length": 52.76470588235294,
"alnum_prop": 0.6528428093645485,
"repo_name": "hasadna/knesset-data-pipelines",
"id": "0306dabceece55976e0d3df394a5e1479bd220f5",
"size": "4485",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "datapackage_pipelines_knesset/common/base_processors/base_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1469"
},
{
"name": "Jupyter Notebook",
"bytes": "4163927"
},
{
"name": "Python",
"bytes": "294483"
},
{
"name": "Shell",
"bytes": "1601"
}
],
"symlink_target": ""
} |
import logging
from datetime import datetime
from django.contrib.auth.models import User
from desktop.conf import LDAP
from models import UserProfile, get_profile
from views import import_ldap_users
import ldap_access
LOG = logging.getLogger(__name__)
class LdapSynchronizationMiddleware(object):
"""
Synchronize against LDAP authority.
"""
USER_CACHE_NAME = 'ldap_use_group_sync_cache'
def process_request(self, request):
user = request.user
if not user or not user.is_authenticated():
return
if not User.objects.filter(username=user.username, userprofile__creation_method=str(UserProfile.CreationMethod.EXTERNAL)).exists():
LOG.warn("User %s is not an Ldap user" % user.username)
return
# Cache should be cleared when user logs out.
if self.USER_CACHE_NAME not in request.session:
if LDAP.LDAP_SERVERS.get():
connection = ldap_access.get_connection_from_server(next(LDAP.LDAP_SERVERS.__iter__()))
else:
connection = ldap_access.get_connection_from_server()
import_ldap_users(connection, user.username, sync_groups=True, import_by_dn=False)
request.session[self.USER_CACHE_NAME] = True
request.session.modified = True
class UpdateLastActivityMiddleware(object):
"""
Middleware to track the last activity of a user.
"""
def process_request(self, request):
user = request.user
if not user or not user.is_authenticated():
return
profile = get_profile(user)
profile.last_activity = datetime.now()
try:
profile.save()
except DatabaseError:
log.exception('Error saving profile information')
| {
"content_hash": "fb584023c2111d8380b87c9d8766002a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 135,
"avg_line_length": 26.19047619047619,
"alnum_prop": 0.7066666666666667,
"repo_name": "vmax-feihu/hue",
"id": "6ba9f27a4358345b138e5ae5245797306e2638ad",
"size": "2442",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "apps/useradmin/src/useradmin/middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2397157"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "441429"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "24042046"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "3025185"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Makefile",
"bytes": "98329"
},
{
"name": "Mako",
"bytes": "2291836"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "38118517"
},
{
"name": "Scala",
"bytes": "214557"
},
{
"name": "Shell",
"bytes": "54721"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "250165"
},
{
"name": "XSLT",
"bytes": "510395"
}
],
"symlink_target": ""
} |
"""Support for Ambient Weather Station Service."""
import asyncio
import logging
from aioambient import Client
from aioambient.errors import WebsocketError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_NAME,
ATTR_LOCATION,
CONF_API_KEY,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
from .config_flow import configured_instances
from .const import (
ATTR_LAST_DATA,
CONF_APP_KEY,
DATA_CLIENT,
DOMAIN,
TOPIC_UPDATE,
TYPE_BINARY_SENSOR,
TYPE_SENSOR,
)
_LOGGER = logging.getLogger(__name__)
DATA_CONFIG = "config"
DEFAULT_SOCKET_MIN_RETRY = 15
DEFAULT_WATCHDOG_SECONDS = 5 * 60
TYPE_24HOURRAININ = "24hourrainin"
TYPE_BAROMABSIN = "baromabsin"
TYPE_BAROMRELIN = "baromrelin"
TYPE_BATT1 = "batt1"
TYPE_BATT10 = "batt10"
TYPE_BATT2 = "batt2"
TYPE_BATT3 = "batt3"
TYPE_BATT4 = "batt4"
TYPE_BATT5 = "batt5"
TYPE_BATT6 = "batt6"
TYPE_BATT7 = "batt7"
TYPE_BATT8 = "batt8"
TYPE_BATT9 = "batt9"
TYPE_BATTOUT = "battout"
TYPE_CO2 = "co2"
TYPE_DAILYRAININ = "dailyrainin"
TYPE_DEWPOINT = "dewPoint"
TYPE_EVENTRAININ = "eventrainin"
TYPE_FEELSLIKE = "feelsLike"
TYPE_HOURLYRAININ = "hourlyrainin"
TYPE_HUMIDITY = "humidity"
TYPE_HUMIDITY1 = "humidity1"
TYPE_HUMIDITY10 = "humidity10"
TYPE_HUMIDITY2 = "humidity2"
TYPE_HUMIDITY3 = "humidity3"
TYPE_HUMIDITY4 = "humidity4"
TYPE_HUMIDITY5 = "humidity5"
TYPE_HUMIDITY6 = "humidity6"
TYPE_HUMIDITY7 = "humidity7"
TYPE_HUMIDITY8 = "humidity8"
TYPE_HUMIDITY9 = "humidity9"
TYPE_HUMIDITYIN = "humidityin"
TYPE_LASTRAIN = "lastRain"
TYPE_MAXDAILYGUST = "maxdailygust"
TYPE_MONTHLYRAININ = "monthlyrainin"
TYPE_RELAY1 = "relay1"
TYPE_RELAY10 = "relay10"
TYPE_RELAY2 = "relay2"
TYPE_RELAY3 = "relay3"
TYPE_RELAY4 = "relay4"
TYPE_RELAY5 = "relay5"
TYPE_RELAY6 = "relay6"
TYPE_RELAY7 = "relay7"
TYPE_RELAY8 = "relay8"
TYPE_RELAY9 = "relay9"
TYPE_SOILHUM1 = "soilhum1"
TYPE_SOILHUM10 = "soilhum10"
TYPE_SOILHUM2 = "soilhum2"
TYPE_SOILHUM3 = "soilhum3"
TYPE_SOILHUM4 = "soilhum4"
TYPE_SOILHUM5 = "soilhum5"
TYPE_SOILHUM6 = "soilhum6"
TYPE_SOILHUM7 = "soilhum7"
TYPE_SOILHUM8 = "soilhum8"
TYPE_SOILHUM9 = "soilhum9"
TYPE_SOILTEMP1F = "soiltemp1f"
TYPE_SOILTEMP10F = "soiltemp10f"
TYPE_SOILTEMP2F = "soiltemp2f"
TYPE_SOILTEMP3F = "soiltemp3f"
TYPE_SOILTEMP4F = "soiltemp4f"
TYPE_SOILTEMP5F = "soiltemp5f"
TYPE_SOILTEMP6F = "soiltemp6f"
TYPE_SOILTEMP7F = "soiltemp7f"
TYPE_SOILTEMP8F = "soiltemp8f"
TYPE_SOILTEMP9F = "soiltemp9f"
TYPE_SOLARRADIATION = "solarradiation"
TYPE_SOLARRADIATION_LX = "solarradiation_lx"
TYPE_TEMP10F = "temp10f"
TYPE_TEMP1F = "temp1f"
TYPE_TEMP2F = "temp2f"
TYPE_TEMP3F = "temp3f"
TYPE_TEMP4F = "temp4f"
TYPE_TEMP5F = "temp5f"
TYPE_TEMP6F = "temp6f"
TYPE_TEMP7F = "temp7f"
TYPE_TEMP8F = "temp8f"
TYPE_TEMP9F = "temp9f"
TYPE_TEMPF = "tempf"
TYPE_TEMPINF = "tempinf"
TYPE_TOTALRAININ = "totalrainin"
TYPE_UV = "uv"
TYPE_WEEKLYRAININ = "weeklyrainin"
TYPE_WINDDIR = "winddir"
TYPE_WINDDIR_AVG10M = "winddir_avg10m"
TYPE_WINDDIR_AVG2M = "winddir_avg2m"
TYPE_WINDGUSTDIR = "windgustdir"
TYPE_WINDGUSTMPH = "windgustmph"
TYPE_WINDSPDMPH_AVG10M = "windspdmph_avg10m"
TYPE_WINDSPDMPH_AVG2M = "windspdmph_avg2m"
TYPE_WINDSPEEDMPH = "windspeedmph"
TYPE_YEARLYRAININ = "yearlyrainin"
SENSOR_TYPES = {
TYPE_24HOURRAININ: ("24 Hr Rain", "in", TYPE_SENSOR, None),
TYPE_BAROMABSIN: ("Abs Pressure", "inHg", TYPE_SENSOR, "pressure"),
TYPE_BAROMRELIN: ("Rel Pressure", "inHg", TYPE_SENSOR, "pressure"),
TYPE_BATT10: ("Battery 10", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT1: ("Battery 1", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT2: ("Battery 2", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT3: ("Battery 3", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT4: ("Battery 4", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT5: ("Battery 5", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT6: ("Battery 6", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT7: ("Battery 7", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT8: ("Battery 8", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT9: ("Battery 9", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATTOUT: ("Battery", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_CO2: ("co2", "ppm", TYPE_SENSOR, None),
TYPE_DAILYRAININ: ("Daily Rain", "in", TYPE_SENSOR, None),
TYPE_DEWPOINT: ("Dew Point", "°F", TYPE_SENSOR, "temperature"),
TYPE_EVENTRAININ: ("Event Rain", "in", TYPE_SENSOR, None),
TYPE_FEELSLIKE: ("Feels Like", "°F", TYPE_SENSOR, "temperature"),
TYPE_HOURLYRAININ: ("Hourly Rain Rate", "in/hr", TYPE_SENSOR, None),
TYPE_HUMIDITY10: ("Humidity 10", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY1: ("Humidity 1", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY2: ("Humidity 2", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY3: ("Humidity 3", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY4: ("Humidity 4", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY5: ("Humidity 5", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY6: ("Humidity 6", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY7: ("Humidity 7", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY8: ("Humidity 8", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY9: ("Humidity 9", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY: ("Humidity", "%", TYPE_SENSOR, "humidity"),
TYPE_HUMIDITYIN: ("Humidity In", "%", TYPE_SENSOR, "humidity"),
TYPE_LASTRAIN: ("Last Rain", None, TYPE_SENSOR, "timestamp"),
TYPE_MAXDAILYGUST: ("Max Gust", "mph", TYPE_SENSOR, None),
TYPE_MONTHLYRAININ: ("Monthly Rain", "in", TYPE_SENSOR, None),
TYPE_RELAY10: ("Relay 10", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY1: ("Relay 1", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY2: ("Relay 2", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY3: ("Relay 3", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY4: ("Relay 4", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY5: ("Relay 5", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY6: ("Relay 6", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY7: ("Relay 7", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY8: ("Relay 8", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_RELAY9: ("Relay 9", None, TYPE_BINARY_SENSOR, "connectivity"),
TYPE_SOILHUM10: ("Soil Humidity 10", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM1: ("Soil Humidity 1", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM2: ("Soil Humidity 2", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM3: ("Soil Humidity 3", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM4: ("Soil Humidity 4", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM5: ("Soil Humidity 5", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM6: ("Soil Humidity 6", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM7: ("Soil Humidity 7", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM8: ("Soil Humidity 8", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILHUM9: ("Soil Humidity 9", "%", TYPE_SENSOR, "humidity"),
TYPE_SOILTEMP10F: ("Soil Temp 10", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP1F: ("Soil Temp 1", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP2F: ("Soil Temp 2", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP3F: ("Soil Temp 3", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP4F: ("Soil Temp 4", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP5F: ("Soil Temp 5", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP6F: ("Soil Temp 6", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP7F: ("Soil Temp 7", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP8F: ("Soil Temp 8", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP9F: ("Soil Temp 9", "°F", TYPE_SENSOR, "temperature"),
TYPE_SOLARRADIATION: ("Solar Rad", "W/m^2", TYPE_SENSOR, None),
TYPE_SOLARRADIATION_LX: ("Solar Rad (lx)", "lx", TYPE_SENSOR, "illuminance"),
TYPE_TEMP10F: ("Temp 10", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP1F: ("Temp 1", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP2F: ("Temp 2", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP3F: ("Temp 3", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP4F: ("Temp 4", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP5F: ("Temp 5", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP6F: ("Temp 6", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP7F: ("Temp 7", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP8F: ("Temp 8", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMP9F: ("Temp 9", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMPF: ("Temp", "°F", TYPE_SENSOR, "temperature"),
TYPE_TEMPINF: ("Inside Temp", "°F", TYPE_SENSOR, "temperature"),
TYPE_TOTALRAININ: ("Lifetime Rain", "in", TYPE_SENSOR, None),
TYPE_UV: ("uv", "Index", TYPE_SENSOR, None),
TYPE_WEEKLYRAININ: ("Weekly Rain", "in", TYPE_SENSOR, None),
TYPE_WINDDIR: ("Wind Dir", "°", TYPE_SENSOR, None),
TYPE_WINDDIR_AVG10M: ("Wind Dir Avg 10m", "°", TYPE_SENSOR, None),
TYPE_WINDDIR_AVG2M: ("Wind Dir Avg 2m", "mph", TYPE_SENSOR, None),
TYPE_WINDGUSTDIR: ("Gust Dir", "°", TYPE_SENSOR, None),
TYPE_WINDGUSTMPH: ("Wind Gust", "mph", TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG10M: ("Wind Avg 10m", "mph", TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG2M: ("Wind Avg 2m", "mph", TYPE_SENSOR, None),
TYPE_WINDSPEEDMPH: ("Wind Speed", "mph", TYPE_SENSOR, None),
TYPE_YEARLYRAININ: ("Yearly Rain", "in", TYPE_SENSOR, None),
}
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_APP_KEY): cv.string,
vol.Required(CONF_API_KEY): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Ambient PWS component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
# Store config for use during entry setup:
hass.data[DOMAIN][DATA_CONFIG] = conf
if conf[CONF_APP_KEY] in configured_instances(hass):
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_API_KEY: conf[CONF_API_KEY], CONF_APP_KEY: conf[CONF_APP_KEY]},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Ambient PWS as config entry."""
session = aiohttp_client.async_get_clientsession(hass)
try:
ambient = AmbientStation(
hass,
config_entry,
Client(
config_entry.data[CONF_API_KEY],
config_entry.data[CONF_APP_KEY],
session,
),
)
hass.loop.create_task(ambient.ws_connect())
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = ambient
except WebsocketError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, ambient.client.websocket.disconnect()
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Ambient PWS config entry."""
ambient = hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
hass.async_create_task(ambient.ws_disconnect())
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in ("binary_sensor", "sensor")
]
await asyncio.gather(*tasks)
return True
async def async_migrate_entry(hass, config_entry):
"""Migrate old entry."""
version = config_entry.version
_LOGGER.debug("Migrating from version %s", version)
# 1 -> 2: Unique ID format changed, so delete and re-import:
if version == 1:
dev_reg = await hass.helpers.device_registry.async_get_registry()
dev_reg.async_clear_config_entry(config_entry)
en_reg = await hass.helpers.entity_registry.async_get_registry()
en_reg.async_clear_config_entry(config_entry)
version = config_entry.version = 2
hass.config_entries.async_update_entry(config_entry)
_LOGGER.info("Migration to version %s successful", version)
return True
class AmbientStation:
"""Define a class to handle the Ambient websocket."""
def __init__(self, hass, config_entry, client):
"""Initialize."""
self._config_entry = config_entry
self._entry_setup_complete = False
self._hass = hass
self._watchdog_listener = None
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client = client
self.monitored_conditions = []
self.stations = {}
async def _attempt_connect(self):
"""Attempt to connect to the socket (retrying later on fail)."""
try:
await self.client.websocket.connect()
except WebsocketError as err:
_LOGGER.error("Error with the websocket connection: %s", err)
self._ws_reconnect_delay = min(2 * self._ws_reconnect_delay, 480)
async_call_later(self._hass, self._ws_reconnect_delay, self.ws_connect)
async def ws_connect(self):
"""Register handlers and connect to the websocket."""
async def _ws_reconnect(event_time):
"""Forcibly disconnect from and reconnect to the websocket."""
_LOGGER.debug("Watchdog expired; forcing socket reconnection")
await self.client.websocket.disconnect()
await self._attempt_connect()
def on_connect():
"""Define a handler to fire when the websocket is connected."""
_LOGGER.info("Connected to websocket")
_LOGGER.debug("Watchdog starting")
if self._watchdog_listener is not None:
self._watchdog_listener()
self._watchdog_listener = async_call_later(
self._hass, DEFAULT_WATCHDOG_SECONDS, _ws_reconnect
)
def on_data(data):
"""Define a handler to fire when the data is received."""
mac_address = data["macAddress"]
if data != self.stations[mac_address][ATTR_LAST_DATA]:
_LOGGER.debug("New data received: %s", data)
self.stations[mac_address][ATTR_LAST_DATA] = data
async_dispatcher_send(self._hass, TOPIC_UPDATE)
_LOGGER.debug("Resetting watchdog")
self._watchdog_listener()
self._watchdog_listener = async_call_later(
self._hass, DEFAULT_WATCHDOG_SECONDS, _ws_reconnect
)
def on_disconnect():
"""Define a handler to fire when the websocket is disconnected."""
_LOGGER.info("Disconnected from websocket")
def on_subscribed(data):
"""Define a handler to fire when the subscription is set."""
for station in data["devices"]:
if station["macAddress"] in self.stations:
continue
_LOGGER.debug("New station subscription: %s", data)
self.monitored_conditions = [
k for k in station["lastData"] if k in SENSOR_TYPES
]
# If the user is monitoring brightness (in W/m^2),
# make sure we also add a calculated sensor for the
# same data measured in lx:
if TYPE_SOLARRADIATION in self.monitored_conditions:
self.monitored_conditions.append(TYPE_SOLARRADIATION_LX)
self.stations[station["macAddress"]] = {
ATTR_LAST_DATA: station["lastData"],
ATTR_LOCATION: station.get("info", {}).get("location"),
ATTR_NAME: station.get("info", {}).get(
"name", station["macAddress"]
),
}
# If the websocket disconnects and reconnects, the on_subscribed
# handler will get called again; in that case, we don't want to
# attempt forward setup of the config entry (because it will have
# already been done):
if not self._entry_setup_complete:
for component in ("binary_sensor", "sensor"):
self._hass.async_create_task(
self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component
)
)
self._entry_setup_complete = True
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client.websocket.on_connect(on_connect)
self.client.websocket.on_data(on_data)
self.client.websocket.on_disconnect(on_disconnect)
self.client.websocket.on_subscribed(on_subscribed)
await self._attempt_connect()
async def ws_disconnect(self):
"""Disconnect from the websocket."""
await self.client.websocket.disconnect()
class AmbientWeatherEntity(Entity):
"""Define a base Ambient PWS entity."""
def __init__(
self, ambient, mac_address, station_name, sensor_type, sensor_name, device_class
):
"""Initialize the sensor."""
self._ambient = ambient
self._device_class = device_class
self._async_unsub_dispatcher_connect = None
self._mac_address = mac_address
self._sensor_name = sensor_name
self._sensor_type = sensor_type
self._state = None
self._station_name = station_name
@property
def available(self):
"""Return True if entity is available."""
# Since the solarradiation_lx sensor is created only if the
# user shows a solarradiation sensor, ensure that the
# solarradiation_lx sensor shows as available if the solarradiation
# sensor is available:
if self._sensor_type == TYPE_SOLARRADIATION_LX:
return (
self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get(
TYPE_SOLARRADIATION
)
is not None
)
return (
self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get(
self._sensor_type
)
is not None
)
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._mac_address)},
"name": self._station_name,
"manufacturer": "Ambient Weather",
}
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._station_name}_{self._sensor_name}"
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return f"{self._mac_address}_{self._sensor_type}"
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, update
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
| {
"content_hash": "f4f4762e2cfc2a2f55bde55399e9f289",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 88,
"avg_line_length": 37.990512333965846,
"alnum_prop": 0.6270915538684382,
"repo_name": "qedi-r/home-assistant",
"id": "7a805d6b86763d443a0514395e689dd70f165ae4",
"size": "20048",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ambient_station/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import copy
from tacker import context
from tacker import objects
from tacker.tests.unit.db.base import SqlTestCase
from tacker.tests.unit.objects import fakes
from tacker.tests import uuidsentinel
class TestInstantiatedVnfInfo(SqlTestCase):
def setUp(self):
super(TestInstantiatedVnfInfo, self).setUp()
self.context = context.get_admin_context()
self.vnf_package = self._create_and_upload_vnf_package()
self.vnf_instance = self._create_vnf_instance()
self.resource_handle_info = self._create_resource_handle()
self.ext_link_port_info = self._create_ext_link_port_info()
self.ext_virtual_link_info = self._create_ext_virtual_link_info()
self.vnf_link_ports_info = self._create_vnf_link_ports()
self.ip_addresses_info = self._create_ip_addresses_info()
self.ip_over_ethernet = self._create_ip_over_ethernet_info()
self.cp_protocol_info = self._create_cp_protocol_info()
self.vnf_external_cp_info = self._create_vnf_external_cp_info()
self.vnfc_cp_info = self._create_vnfc_cp_info()
self.vnfc_resource_info = self._create_vnfc_resource_info()
self.virtual_link_resource_info = \
self._create_virtual_link_resource_info()
self.virtual_storage_resource_info = \
self._create_virtual_storage_resource_info()
self.ext_managed_virtual_link_info = \
self._create_ext_managed_virtual_link_info()
def _create_and_upload_vnf_package(self):
vnf_package = objects.VnfPackage(context=self.context,
**fakes.vnf_package_data)
vnf_package.create()
vnf_pack_vnfd = fakes.get_vnf_package_vnfd_data(
vnf_package.id, uuidsentinel.vnfd_id)
vnf_pack_vnfd_obj = objects.VnfPackageVnfd(
context=self.context, **vnf_pack_vnfd)
vnf_pack_vnfd_obj.create()
self.vnf_package_vnfd = vnf_pack_vnfd_obj
vnf_package.vnf_package = "ONBOARDED"
vnf_package.save()
return vnf_package
def _create_vnf_instance(self):
vnf_instance_data = fakes.get_vnf_instance_data(
self.vnf_package_vnfd.vnfd_id)
vnf_instance = objects.VnfInstance(context=self.context,
**vnf_instance_data)
vnf_instance.create()
return vnf_instance
def _create_vnf_external_cp_info(self):
vnf_external_cp_data = copy.deepcopy(fakes.vnf_external_cp_info)
vnf_external_cp_data.update(
{'cp_protocol_info': [self.cp_protocol_info]})
vnf_external_cp_info = objects.VnfExtCpInfo(
context=self.context, **vnf_external_cp_data)
return vnf_external_cp_info
def _create_resource_handle(self):
resource_handle_data = copy.deepcopy(fakes.resource_handle_info)
resource_handle_info = objects.ResourceHandle(
context=self.context, **resource_handle_data)
return resource_handle_info
def _create_ext_link_port_info(self):
ext_link_port_info = copy.deepcopy(fakes.ext_link_port_info)
ext_link_port_info.update(
{'resource_handle': self.resource_handle_info})
ext_link_port_info = objects.ExtLinkPortInfo(
context=self.context, **ext_link_port_info)
return ext_link_port_info
def _create_ext_virtual_link_info(self):
ext_virtual_link_info = copy.deepcopy(fakes.ext_virtual_link_info)
ext_virtual_link_info.update(
{'resource_handle_info': self.resource_handle_info,
'ext_link_ports': self.ext_link_port_info})
ext_virtual_link_info = objects.VnfExtCpInfo(
context=self.context, **ext_virtual_link_info)
return ext_virtual_link_info
def _create_vnf_link_ports(self):
vnf_link_ports_info = copy.deepcopy(fakes.vnf_link_ports)
vnf_link_ports_info.update(
{'resource_handle': self.resource_handle_info})
vnf_link_ports_info = objects.VnfLinkPortInfo(
context=self.context, **vnf_link_ports_info)
return vnf_link_ports_info
def _create_ext_managed_virtual_link_info(self):
ext_managed_virtual_link_info = copy.deepcopy(
fakes.ext_managed_virtual_link_info)
ext_managed_virtual_link_info.update(
{'network_resource': self.resource_handle_info,
'vnf_link_ports': [self.vnf_link_ports_info]})
ext_managed_virtual_link_info = objects.ExtManagedVirtualLinkInfo(
context=self.context, **ext_managed_virtual_link_info)
return ext_managed_virtual_link_info
def _create_ip_addresses_info(self):
ip_address_info = copy.deepcopy(fakes.ip_address_info)
ip_address_info = objects.IpAddress(
context=self.context, **ip_address_info)
return ip_address_info
def _create_ip_over_ethernet_info(self):
ip_over_ethernet_onfo = copy.deepcopy(
fakes.ip_over_ethernet_address_info)
ip_over_ethernet_onfo.update(
{'ip_addresses': [self.ip_addresses_info]})
ip_over_ethernet_onfo = objects.IpOverEthernetAddressInfo(
context=self.context, **ip_over_ethernet_onfo)
return ip_over_ethernet_onfo
def _create_cp_protocol_info(self):
cp_protocol_info = copy.deepcopy(fakes.cp_protocol_info)
cp_protocol_info.update(
{'ip_over_ethernet': self.ip_over_ethernet})
cp_protocol_info = objects.CpProtocolInfo(
context=self.context, **cp_protocol_info)
return cp_protocol_info
def _create_vnfc_cp_info(self):
vnfc_cp_info = copy.deepcopy(fakes.vnfc_cp_info)
vnfc_cp_info.update(
{'cp_protocol_info': [self.cp_protocol_info]})
vnfc_cp_info = objects.VnfcCpInfo(
context=self.context, **vnfc_cp_info)
return vnfc_cp_info
def _create_vnfc_resource_info(self):
vnfc_resource_info = copy.deepcopy(fakes.vnfc_resource_info)
vnfc_resource_info.update(
{'compute_resource': self.resource_handle_info,
'vnf_link_ports': [self.vnf_link_ports_info],
'vnfc_cp_info': [self.vnfc_cp_info]})
vnfc_resource_info = objects.VnfcResourceInfo(
context=self.context, **vnfc_resource_info)
return vnfc_resource_info
def _create_virtual_link_resource_info(self):
vnf_virtual_link_resource_info = copy.deepcopy(
fakes.vnf_virtual_link_resource_info)
vnf_virtual_link_resource_info.update(
{'network_resource': self.resource_handle_info,
'vnf_link_ports': [self.vnf_link_ports_info]})
vnf_virtual_link_resource_info = objects.VnfVirtualLinkResourceInfo(
context=self.context, **vnf_virtual_link_resource_info)
return vnf_virtual_link_resource_info
def _create_virtual_storage_resource_info(self):
virtual_storage_resource_info = copy.deepcopy(
fakes.virtual_storage_resource_info)
virtual_storage_resource_info.update(
{'storage_resource': self.resource_handle_info})
virtual_storage_resource_info = objects.VirtualStorageResourceInfo(
context=self.context, **virtual_storage_resource_info)
return virtual_storage_resource_info
def test_save(self):
instantiated_vnf_info = copy.deepcopy(
fakes.get_instantiated_vnf_info())
instantiated_vnf_info.update(
{'ext_cp_info': [self.vnf_external_cp_info],
'vnf_instance_id': self.vnf_instance.id,
'ext_link_port_info': self.ext_link_port_info,
'ext_managed_virtual_link_info': [
self.ext_managed_virtual_link_info],
'vnfc_resource_info': [self.vnfc_resource_info],
'vnf_virtual_link_resource_info': [
self.virtual_link_resource_info],
'virtual_storage_resource_info': [
self.virtual_storage_resource_info]})
instantiated_vnf_info = objects.InstantiatedVnfInfo(
context=self.context, **instantiated_vnf_info)
instantiated_vnf_info.save()
self.assertIsNotNone(instantiated_vnf_info.created_at)
def test_resource_handle_obj_from_primitive_and_object_to_dict(self):
resource_handle = copy.deepcopy(fakes.resource_handle_info)
result = objects.ResourceHandle.obj_from_primitive(
resource_handle, self.context)
self.assertIsInstance(result, objects.ResourceHandle)
self.assertEqual('TEST', result.vim_level_resource_type)
resource_handle_dict = result.to_dict()
self.assertIsInstance(resource_handle_dict, dict)
self.assertEqual(
'TEST', resource_handle_dict['vim_level_resource_type'])
def test_virt_strg_res_info_obj_from_primitive_and_obj_to_dict(self):
virtual_storage_resource_info = copy.deepcopy(
fakes.virtual_storage_resource_info)
result = objects.VirtualStorageResourceInfo.obj_from_primitive(
virtual_storage_resource_info, self.context)
self.assertIsInstance(result,
objects.VirtualStorageResourceInfo)
virt_strg_res_info_dict = result.to_dict()
self.assertIsInstance(virt_strg_res_info_dict, dict)
def test_vnfc_cp_info_obj_from_primitive_and_obj_to_dict(self):
vnfc_cp_info = copy.deepcopy(fakes.vnfc_cp_info)
result = objects.VnfcCpInfo.obj_from_primitive(
vnfc_cp_info, self.context)
self.assertIsInstance(result, objects.VnfcCpInfo)
vnfc_cp_info = result.to_dict()
self.assertIsInstance(vnfc_cp_info, dict)
def test_vnfc_resource_info_obj_from_primitive_and_obj_to_dict(self):
vnfc_resource_info = copy.deepcopy(fakes.vnfc_resource_info)
result = objects.VnfcResourceInfo.obj_from_primitive(
vnfc_resource_info, self.context)
self.assertIsInstance(result, objects.VnfcResourceInfo)
self.assertEqual({'key': 'value'}, result.metadata)
vnfc_resource_info = result.to_dict()
self.assertIsInstance(vnfc_resource_info, dict)
def test_ext_mng_virt_link_obj_from_primitive_and_obj_to_dict(self):
ext_managed_virtual_link_info = copy.deepcopy(
fakes.ext_managed_virtual_link_info)
result = objects.ExtManagedVirtualLinkInfo.obj_from_primitive(
ext_managed_virtual_link_info, self.context)
self.assertIsInstance(result, objects.ExtManagedVirtualLinkInfo)
ext_mng_virt_link = result.to_dict()
self.assertIsInstance(ext_mng_virt_link, dict)
def test_ext_link_port_info_obj_from_primitive_and_obj_to_dict(self):
ext_link_port_info_data = copy.deepcopy(fakes.ext_link_port_info)
result = objects.ExtLinkPortInfo.obj_from_primitive(
ext_link_port_info_data, self.context)
self.assertIsInstance(result, objects.ExtLinkPortInfo)
ext_link_port_info = result.to_dict()
self.assertIsInstance(ext_link_port_info, dict)
def test_ext_virt_link_info_obj_from_primitive_and_obj_to_dict(self):
ext_virtual_link_info = copy.deepcopy(fakes.ext_virtual_link_info)
result = objects.ExtVirtualLinkInfo.obj_from_primitive(
ext_virtual_link_info, self.context)
self.assertIsInstance(result, objects.ExtVirtualLinkInfo)
ext_virt_link_info = result.to_dict()
self.assertIsInstance(ext_virt_link_info, dict)
def test_vnf_ext_cp_info_obj_from_primitive_and_obj_to_dict(self):
vnf_ext_cp_info = copy.deepcopy(fakes.vnf_ext_cp_info)
result = objects.VnfExtCpInfo.obj_from_primitive(
vnf_ext_cp_info, self.context)
self.assertIsInstance(result, objects.VnfExtCpInfo)
ext_virt_link_info = result.to_dict()
self.assertIsInstance(ext_virt_link_info, dict)
def test_instantiated_info_obj_from_primitive_and_obj_to_dict(self):
instantiated_vnf_info = copy.deepcopy(fakes.instantiated_vnf_info)
result = objects.InstantiatedVnfInfo.obj_from_primitive(
instantiated_vnf_info, self.context)
self.assertIsInstance(result, objects.InstantiatedVnfInfo)
instantiated_vnf_info_dict = result.to_dict()
self.assertIsInstance(instantiated_vnf_info_dict, dict)
| {
"content_hash": "a6a501ea6b8591273d84df6df1233b41",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 76,
"avg_line_length": 46.90530303030303,
"alnum_prop": 0.6550916579181135,
"repo_name": "stackforge/tacker",
"id": "fdcb24d377e11ebd3a1a3f855bc52e8cd452afb1",
"size": "12960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tacker/tests/unit/objects/test_vnf_instantiated_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1143026"
},
{
"name": "Shell",
"bytes": "26584"
}
],
"symlink_target": ""
} |
import re
class version_info(object):
def __init__(self, verstr):
"""verstr - version string"""
m1 = re.match('(.*?)([RBIXSF-])(.*)', verstr)
self.type = m1.group(2)
self.major = tuple(map(int, m1.group(1).split('.'))) # creates tuyple
after_type = m1.group(3).split('.')
self.minor = after_type[0]
if 'X' == self.type:
# assumes form similar to "45-D10", so extract the bits from this
xm = re.match("(\d+)-(\w)(\d+)", self.minor)
if xm is not None:
self.minor = tuple(
[int(xm.group(1)), xm.group(2), int(xm.group(3))])
if len(after_type) < 2:
self.build = None
else:
self.build = int(after_type[1])
# X type not hyphen format, perhaps "11.4X12.1", just extract build rev or set None
else:
if len(after_type) < 2:
self.build = None
else:
self.build = int(after_type[1])
elif ('I' == self.type) or ('-' == self.type):
self.type = 'I'
try:
# assumes that we have a build/spin, but not numeric
self.build = after_type[1]
except:
self.build = None
else:
try:
self.build = int(after_type[1]) # assumes numeric build/spin
except:
self.build = after_type[0] # non-numeric
self.as_tuple = self.major + tuple([self.minor, self.build])
self.v_dict = {'major': self.major, 'type': self.type,
'minor': self.minor, 'build': self.build}
def __iter__(self):
for key in self.v_dict:
yield key, self.v_dict[key]
def __repr__(self):
retstr = "junos.version_info(major={major}, type={type}," \
" minor={minor}, build={build})".format(
major=self.major,
type=self.type,
minor=self.minor,
build=self.build
)
return retstr
def _cmp_tuple(self, other):
bylen = {
2: (self.as_tuple[0:2]),
4: self.as_tuple
}
return bylen[len(other)]
def __lt__(self, other):
return self._cmp_tuple(other) < other
def __le__(self, other):
return self._cmp_tuple(other) <= other
def __gt__(self, other):
return self._cmp_tuple(other) > other
def __ge__(self, other):
return self._cmp_tuple(other) >= other
def __eq__(self, other):
return self._cmp_tuple(other) == other
def __ne__(self, other):
return self._cmp_tuple(other) != other
def _get_swver(dev, facts):
# See if we're VC Capable
if facts['vc_capable'] is True:
try:
return dev.rpc.cli("show version all-members", format='xml')
except:
pass
try:
return dev.rpc.cli("show version invoke-on all-routing-engines",
format='xml')
except:
return dev.rpc.get_software_information()
def facts_software_version(junos, facts):
"""
The following facts are required:
facts['master']
The following facts are assigned:
facts['hostname']
facts['version']
facts['version_<RE#>'] for each RE in dual-RE, cluster or VC system
facts['version_info'] for master RE
"""
x_swver = _get_swver(junos, facts)
if not facts.get('model'):
# try to extract the model from the version information
facts['model'] = x_swver.findtext('.//product-model')
# ------------------------------------------------------------------------
# extract the version information out of the RPC response
# ------------------------------------------------------------------------
f_master = facts.get('master', 'RE0')
if x_swver.tag == 'multi-routing-engine-results':
# we need to find/identify each of the routing-engine (CPU) versions.
if len(x_swver.xpath('./multi-routing-engine-item')) > 1:
facts['2RE'] = True
versions = []
if isinstance(f_master, list):
xpath = './multi-routing-engine-item[re-name="{0}"' \
']/software-information/host-name'.format(f_master[0].lower())
else:
xpath = './multi-routing-engine-item[re-name="{0}"' \
']/software-information/host-name'.format(f_master.lower())
facts['hostname'] = x_swver.findtext(xpath)
if facts['hostname'] is None:
# then there the re-name is not what we are expecting; we should
# handle this better, eh? For now, just assume there is one
# software-information element and take that host-name. @@@ hack.
facts['hostname'] = x_swver.findtext(
'.//software-information/host-name')
for re_sw in x_swver.xpath('.//software-information'):
re_name = re_sw.xpath('preceding-sibling::re-name')[0].text
# handle the cases where the "RE name" could be things like
# "FPC<n>" or "ndoe<n>", and normalize to "RE<n>".
re_name = re.sub(r'(\w+)(\d+)', 'RE\\2', re_name)
# First try the <junos-version> tag present in >= 15.1
swinfo = re_sw.findtext('junos-version', default=None)
if not swinfo:
# For < 15.1, get version from the "junos" package.
pkginfo = re_sw.xpath(
'package-information[normalize-space(name)="junos"]/comment'
)[0].text
try:
swinfo = re.findall(r'\[(.*)\]', pkginfo)[0]
except:
swinfo = "0.0I0.0"
versions.append((re_name.upper(), swinfo))
# now add the versions to the facts <dict>
for re_ver in versions:
facts['version_' + re_ver[0]] = re_ver[1]
if f_master is not None:
master = f_master[0] if isinstance(f_master, list) else f_master
if 'version_' + master in facts:
facts['version'] = facts['version_' + master]
else:
facts['version'] = versions[0][1]
else:
facts['version'] = versions[0][1]
else:
# single-RE
facts['hostname'] = x_swver.findtext('host-name')
# First try the <junos-version> tag present in >= 15.1
swinfo = x_swver.findtext('.//junos-version', default=None)
if not swinfo:
# For < 15.1, get version from the "junos" package.
pkginfo = x_swver.xpath(
'.//package-information[normalize-space(name)="junos"]/comment'
)[0].text
try:
swinfo = re.findall(r'\[(.*)\]', pkginfo)[0]
except:
swinfo = "0.0I0.0"
facts['version'] = swinfo
# ------------------------------------------------------------------------
# create a 'version_info' object based on the master version
# ------------------------------------------------------------------------
facts['version_info'] = version_info(facts['version'])
def version_yaml_representer(dumper, version):
return dumper.represent_mapping(u'tag:yaml.org,2002:map', version.v_dict)
| {
"content_hash": "f472bd465843e89bfee8ddf629a48ff6",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 95,
"avg_line_length": 35.91787439613527,
"alnum_prop": 0.49710827168796234,
"repo_name": "mith1979/ansible_automation",
"id": "bb20f7f91998b03b714f4d7ed45a68ea4e89ea8b",
"size": "7435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/jnpr/junos/facts/swver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import json
import os
import tempfile
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.forms.fields import Field
from django.utils.encoding import smart_unicode
import mock
from jinja2.utils import escape
from nose import SkipTest
from nose.tools import eq_, ok_
from PIL import Image
from pyquery import PyQuery as pq
from tower import strip_whitespace
import mkt
import mkt.site.tests
from lib.video.tests import files as video_files
from mkt.access.models import Group, GroupUser
from mkt.comm.models import CommunicationNote
from mkt.constants import comm, regions
from mkt.developers.models import ActivityLog
from mkt.reviewers.models import RereviewQueue
from mkt.site.fixtures import fixture
from mkt.site.helpers import absolutify
from mkt.site.tests import formset, initial
from mkt.site.tests.test_utils_ import get_image_path
from mkt.site.utils import app_factory
from mkt.translations.models import Translation
from mkt.users.models import UserProfile
from mkt.versions.models import Version
from mkt.webapps.models import AddonExcludedRegion as AER
from mkt.webapps.models import AddonDeviceType, AddonUser, Webapp
response_mock = mock.Mock()
response_mock.read.return_value = '''
{
"name": "Something Ballin!",
"description": "Goin' hard in the paint.",
"launch_path": "/ballin/4.eva",
"developer": {
"name": "Pro Balliner",
"url": "http://www.ballin4eva.xxx"
},
"icons": {
"128": "/ballin/icon.png"
},
"installs_allowed_from": [ "https://marketplace.firefox.com" ]
}
'''
response_mock.headers = {'Content-Type':
'application/x-web-app-manifest+json'}
def get_section_url(addon, section, edit=False):
args = [addon.app_slug, section]
if edit:
args.append('edit')
return reverse('mkt.developers.apps.section', args=args)
class TestEdit(mkt.site.tests.TestCase):
fixtures = fixture('group_admin', 'user_999', 'user_admin',
'user_admin_group', 'webapp_337141')
def setUp(self):
self.webapp = self.get_webapp()
self.url = self.webapp.get_dev_url()
self.user = UserProfile.objects.get(email='[email protected]')
self.login(self.user.email)
def get_webapp(self):
return Webapp.objects.no_cache().get(id=337141)
def get_url(self, section, edit=False):
return get_section_url(self.webapp, section, edit)
def get_dict(self, **kw):
fs = formset(self.cat_initial, initial_count=1)
result = {'name': 'new name', 'slug': 'test_slug',
'description': 'new description'}
result.update(**kw)
result.update(fs)
return result
def compare(self, data, instance=None):
"""Compare an app against a `dict` of expected values."""
mapping = {
'regions': 'get_region_ids'
}
if instance is None:
instance = self.get_webapp()
for k, v in data.iteritems():
k = mapping.get(k, k)
val = getattr(instance, k, '')
if callable(val):
val = val()
if val is None:
val = ''
eq_(unicode(val), unicode(v))
def compare_features(self, data, version=None):
"""
Compare an app's set of required features against a `dict` of expected
values.
"""
if not version:
version = self.get_webapp().current_version
features = version.features
for k, v in data.iteritems():
val = getattr(features, k)
if callable(val):
val = val()
eq_(unicode(val), unicode(v))
def check_form_url(self, section):
# Check form destinations and "Edit" button.
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('form').attr('action'), self.edit_url)
eq_(doc('h2 .button').attr('data-editurl'), self.edit_url)
# Check "Cancel" button.
r = self.client.get(self.edit_url)
eq_(pq(r.content)('form .addon-edit-cancel').attr('href'), self.url)
class TestEditListingWebapp(TestEdit):
fixtures = fixture('webapp_337141')
def test_redirect(self):
r = self.client.get(self.url.replace('edit', ''))
self.assert3xx(r, self.url)
def test_nav_links(self):
r = self.client.get(self.url)
doc = pq(r.content)('.edit-addon-nav')
eq_(doc.length, 2)
eq_(doc('.view-stats').length, 0)
def test_edit_with_no_current_version(self):
# Disable file for latest version, and then update app.current_version.
app = self.get_webapp()
app.versions.latest().all_files[0].update(status=mkt.STATUS_DISABLED)
app.update_version()
# Now try to display edit page.
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_edit_global_xss_name(self):
self.webapp.name = u'My app é <script>alert(5)</script>'
self.webapp.save()
content = smart_unicode(self.client.get(self.url).content)
ok_(not unicode(self.webapp.name) in content)
ok_(unicode(escape(self.webapp.name)) in content)
@mock.patch.object(settings, 'TASK_USER_ID', 999)
class TestEditBasic(TestEdit):
fixtures = TestEdit.fixtures
def setUp(self):
super(TestEditBasic, self).setUp()
self.cat = 'games'
self.dtype = mkt.DEVICE_TYPES.keys()[0]
self.webapp.update(categories=['games'])
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype)
self.url = self.get_url('basic')
self.edit_url = self.get_url('basic', edit=True)
def get_webapp(self):
return Webapp.objects.get(id=337141)
def get_dict(self, **kw):
result = {'device_types': self.dtype, 'slug': 'NeW_SluG',
'description': 'New description with <em>html</em>!',
'manifest_url': self.webapp.manifest_url,
'categories': [self.cat]}
result.update(**kw)
return result
def test_form_url(self):
self.check_form_url('basic')
def test_appslug_visible(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#slug_edit').remove('a, em').text(),
absolutify(u'/\u2026/%s' % self.webapp.app_slug))
def test_edit_slug_success(self):
data = self.get_dict()
r = self.client.post(self.edit_url, data)
self.assertNoFormErrors(r)
eq_(r.status_code, 200)
webapp = self.get_webapp()
eq_(webapp.app_slug, data['slug'].lower())
def test_edit_slug_max_length(self):
r = self.client.post(self.edit_url, self.get_dict(slug='x' * 31))
self.assertFormError(
r, 'form', 'slug',
'Ensure this value has at most 30 characters (it has 31).')
def test_edit_slug_dupe(self):
Webapp.objects.create(app_slug='dupe')
r = self.client.post(self.edit_url, self.get_dict(slug='dupe'))
self.assertFormError(
r, 'form', 'slug',
'This slug is already in use. Please choose another.')
webapp = self.get_webapp()
# Nothing changed.
eq_(webapp.app_slug, self.webapp.app_slug)
def test_edit_xss_description(self):
self.webapp.description = ("This\n<b>IS</b>"
"<script>alert('awesome')</script>")
self.webapp.save()
r = self.client.get(self.url)
eq_(pq(r.content)('#addon-description span[lang]').html(),
"This<br/><b>IS</b><script>alert('awesome')"
'</script>')
def test_edit_xss_name(self):
self.webapp.name = u'My app é <script>alert(5)</script>'
self.webapp.save()
content = smart_unicode(self.client.get(self.url).content)
ok_(not unicode(self.webapp.name) in content)
ok_(unicode(escape(self.webapp.name)) in content)
def test_view_edit_manifest_url_empty(self):
# Empty manifest should throw an error.
r = self.client.post(self.edit_url, self.get_dict(manifest_url=''))
form = r.context['form']
assert 'manifest_url' in form.errors
assert 'This field is required' in form.errors['manifest_url'][0]
@mock.patch('mkt.developers.forms.update_manifests')
def test_view_edit_manifest_url(self, fetch):
assert not self.webapp.in_rereview_queue(), (
'App should not be in re-review queue')
# Should be able to see manifest URL listed.
r = self.client.get(self.url)
eq_(pq(r.content)('#manifest-url a').attr('href'),
self.webapp.manifest_url)
# Devs/admins can edit the manifest URL and should see a text field.
r = self.client.get(self.edit_url)
row = pq(r.content)('#manifest-url')
eq_(row.find('input[name=manifest_url]').length, 1)
eq_(row.find('input[name=manifest_url][readonly]').length, 0)
# POST with the new manifest URL.
url = 'https://ballin.com/ballin4eva.webapp'
r = self.client.post(self.edit_url, self.get_dict(manifest_url=url))
self.assertNoFormErrors(r)
self.webapp = self.get_webapp()
eq_(self.webapp.manifest_url, url)
eq_(self.webapp.app_domain, 'https://ballin.com')
eq_(self.webapp.current_version.version, '1.0')
eq_(self.webapp.versions.count(), 1)
assert self.webapp.in_rereview_queue(), (
'App should be in re-review queue')
# Ensure that we're refreshing the manifest.
fetch.delay.assert_called_once_with([self.webapp.pk])
@mock.patch('mkt.developers.forms.update_manifests')
def test_view_manifest_changed_dupe_app_domain(self, fetch):
self.create_switch('webapps-unique-by-domain')
app_factory(name='Super Duper',
app_domain='https://ballin.com')
self.login('admin')
# POST with new manifest URL.
url = 'https://ballin.com/ballin4eva.webapp'
r = self.client.post(self.edit_url, self.get_dict(manifest_url=url))
form = r.context['form']
assert 'manifest_url' in form.errors
assert 'one app per domain' in form.errors['manifest_url'][0]
eq_(self.get_webapp().manifest_url, self.webapp.manifest_url,
'Manifest URL should not have been changed!')
assert not fetch.delay.called, (
'Manifest should not have been refreshed!')
@mock.patch('mkt.developers.forms.update_manifests')
def test_view_manifest_changed_same_domain_diff_path(self, fetch):
self.create_switch('webapps-unique-by-domain')
self.login('admin')
# POST with new manifest URL for same domain but w/ different path.
data = self.get_dict(manifest_url=self.webapp.manifest_url + 'xxx')
r = self.client.post(self.edit_url, data)
self.assertNoFormErrors(r)
eq_(self.get_webapp().manifest_url, self.webapp.manifest_url + 'xxx',
'Manifest URL should have changed!')
assert not self.webapp.in_rereview_queue(), (
'App should be in re-review queue because an admin changed it')
# Ensure that we're refreshing the manifest.
fetch.delay.assert_called_once_with([self.webapp.pk])
def test_view_manifest_url_changed(self):
new_url = 'http://omg.org/yes'
self.webapp.manifest_url = new_url
self.webapp.save()
# If we change the `manifest_url` manually, the URL here should change.
r = self.client.get(self.url)
eq_(pq(r.content)('#manifest-url a').attr('href'), new_url)
def test_categories_listed(self):
r = self.client.get(self.url)
eq_(pq(r.content)('#addon-categories-edit').text(), unicode('Games'))
r = self.client.post(self.url)
eq_(pq(r.content)('#addon-categories-edit').text(), unicode('Games'))
def test_edit_categories_add(self):
new = 'books'
cats = [self.cat, new]
self.client.post(self.edit_url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories), sorted(cats))
def test_edit_categories_addandremove(self):
new = 'books'
cats = [new]
self.client.post(self.edit_url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories), sorted(cats))
@mock.patch('mkt.webapps.models.Webapp.save')
def test_edit_categories_required(self, save):
r = self.client.post(self.edit_url, self.get_dict(categories=[]))
eq_(r.context['cat_form'].errors['categories'][0],
unicode(Field.default_error_messages['required']))
assert not save.called
def test_edit_categories_xss(self):
new = '<script>alert("xss");</script>'
cats = [self.cat, new]
r = self.client.post(self.edit_url, self.get_dict(categories=cats))
assert '<script>alert' not in r.content
assert '<script>alert' in r.content
def test_edit_categories_nonexistent(self):
r = self.client.post(self.edit_url, self.get_dict(categories=[100]))
eq_(r.context['cat_form'].errors['categories'],
['Select a valid choice. 100 is not one of the available '
'choices.'])
def test_edit_categories_max(self):
cats = [self.cat, 'books', 'social']
r = self.client.post(self.edit_url, self.get_dict(categories=cats))
eq_(r.context['cat_form'].errors['categories'],
['You can have only 2 categories.'])
def test_edit_check_description(self):
# Make sure bug 629779 doesn't return.
r = self.client.post(self.edit_url, self.get_dict())
eq_(r.status_code, 200)
eq_(self.get_webapp().description, self.get_dict()['description'])
def test_edit_slug_valid(self):
old_edit = self.edit_url
data = self.get_dict(slug='valid')
r = self.client.post(self.edit_url, data)
doc = pq(r.content)
assert doc('form').attr('action') != old_edit
def test_edit_as_developer(self):
self.login('[email protected]')
data = self.get_dict()
r = self.client.post(self.edit_url, data)
# Make sure we get errors when they are just regular users.
eq_(r.status_code, 403)
AddonUser.objects.create(addon=self.webapp, user_id=999,
role=mkt.AUTHOR_ROLE_DEV)
r = self.client.post(self.edit_url, data)
eq_(r.status_code, 200)
webapp = self.get_webapp()
eq_(unicode(webapp.app_slug), data['slug'].lower())
eq_(unicode(webapp.description), data['description'])
def test_l10n(self):
self.webapp.update(default_locale='en-US')
url = self.webapp.get_dev_url('edit')
r = self.client.get(url)
eq_(pq(r.content)('#l10n-menu').attr('data-default'), 'en-us',
'l10n menu not visible for %s' % url)
def test_l10n_not_us(self):
self.webapp.update(default_locale='fr')
url = self.webapp.get_dev_url('edit')
r = self.client.get(url)
eq_(pq(r.content)('#l10n-menu').attr('data-default'), 'fr',
'l10n menu not visible for %s' % url)
def test_edit_l10n(self):
data = {
'slug': self.webapp.app_slug,
'manifest_url': self.webapp.manifest_url,
'categories': [self.cat],
'description_en-us': u'Nêw english description',
'description_fr': u'Nëw french description',
'releasenotes_en-us': u'Nëw english release notes',
'releasenotes_fr': u'Nêw french release notes'
}
res = self.client.post(self.edit_url, data)
eq_(res.status_code, 200)
self.webapp = self.get_webapp()
version = self.webapp.current_version.reload()
desc_id = self.webapp.description_id
notes_id = version.releasenotes_id
eq_(self.webapp.description, data['description_en-us'])
eq_(version.releasenotes, data['releasenotes_en-us'])
eq_(unicode(Translation.objects.get(id=desc_id, locale='fr')),
data['description_fr'])
eq_(unicode(Translation.objects.get(id=desc_id, locale='en-us')),
data['description_en-us'])
eq_(unicode(Translation.objects.get(id=notes_id, locale='fr')),
data['releasenotes_fr'])
eq_(unicode(Translation.objects.get(id=notes_id, locale='en-us')),
data['releasenotes_en-us'])
@mock.patch('mkt.developers.views._update_manifest')
def test_refresh(self, fetch):
self.login('[email protected]')
url = reverse('mkt.developers.apps.refresh_manifest',
args=[self.webapp.app_slug])
r = self.client.post(url)
eq_(r.status_code, 204)
fetch.assert_called_once_with(self.webapp.pk, True, {})
@mock.patch('mkt.developers.views._update_manifest')
def test_refresh_dev_only(self, fetch):
self.login('[email protected]')
url = reverse('mkt.developers.apps.refresh_manifest',
args=[self.webapp.app_slug])
r = self.client.post(url)
eq_(r.status_code, 403)
eq_(fetch.called, 0)
def test_view_developer_name(self):
r = self.client.get(self.url)
developer_name = self.webapp.current_version.developer_name
content = smart_unicode(r.content)
eq_(pq(content)('#developer-name td').html().strip(), developer_name)
def test_view_developer_name_xss(self):
version = self.webapp.current_version
version._developer_name = '<script>alert("xss-devname")</script>'
version.save()
r = self.client.get(self.url)
assert '<script>alert' not in r.content
assert '<script>alert' in r.content
def test_edit_packaged(self):
self.get_webapp().update(is_packaged=True)
data = self.get_dict()
data.pop('manifest_url')
r = self.client.post(self.edit_url, data)
eq_(r.status_code, 200)
eq_(r.context['editable'], False)
eq_(self.get_webapp().description, self.get_dict()['description'])
def test_edit_basic_not_public(self):
# Disable file for latest version, and then update app.current_version.
app = self.get_webapp()
app.versions.latest().all_files[0].update(status=mkt.STATUS_DISABLED)
app.update_version()
# Now try to display edit page.
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_view_release_notes(self):
version = self.webapp.current_version
version.releasenotes = u'Chëese !'
version.save()
res = self.client.get(self.url)
eq_(res.status_code, 200)
content = smart_unicode(res.content)
eq_(pq(content)('#releasenotes td span[lang]').html().strip(),
version.releasenotes)
self.webapp.update(is_packaged=True)
res = self.client.get(self.url)
eq_(res.status_code, 200)
content = smart_unicode(res.content)
eq_(pq(content)('#releasenotes').length, 0)
def test_edit_release_notes(self):
self.webapp.previews.create()
self.webapp.support_email = '[email protected]'
self.webapp.save()
data = self.get_dict(releasenotes=u'I can hâz release notes')
res = self.client.post(self.edit_url, data)
releasenotes = self.webapp.reload().latest_version.releasenotes
eq_(res.status_code, 200)
eq_(releasenotes, data['releasenotes'])
# Make sure publish_type wasn't reset by accident.
eq_(self.webapp.reload().publish_type, mkt.PUBLISH_IMMEDIATE)
def test_edit_release_notes_pending(self):
# Like test_edit_release_notes, but with a pending app.
file_ = self.webapp.current_version.all_files[0]
file_.update(status=mkt.STATUS_PENDING)
self.webapp.update(status=mkt.STATUS_PENDING)
self.test_edit_release_notes()
eq_(self.webapp.reload().status, mkt.STATUS_PENDING)
def test_edit_release_notes_packaged(self):
# You are not supposed to edit release notes from the basic edit
# page if you app is packaged. Instead this is done from the version
# edit page.
self.webapp.update(is_packaged=True)
data = self.get_dict(releasenotes=u'I can not hâz release notes')
res = self.client.post(self.edit_url, data)
releasenotes = self.webapp.current_version.reload().releasenotes
eq_(res.status_code, 200)
eq_(releasenotes, None)
def test_view_releasenotes_xss(self):
version = self.webapp.current_version
version.releasenotes = '<script>alert("xss-devname")</script>'
version.save()
r = self.client.get(self.url)
assert '<script>alert' not in r.content
assert '<script>alert' in r.content
class TestEditCountryLanguage(TestEdit):
def get_webapp(self):
return Webapp.objects.get(id=337141)
def test_data_visible(self):
clean_countries = []
self.get_webapp().current_version.update(supported_locales='de,es')
res = self.client.get(self.url)
eq_(res.status_code, 200)
countries = (pq(pq(res.content)('#edit-app-language tr').eq(0))
.find('td').remove('small').text())
langs = (pq(pq(res.content)('#edit-app-language tr').eq(1)).find('td')
.remove('small').text())
for c in countries.split(', '):
clean_countries.append(strip_whitespace(c))
# eq_(langs, u'English (US) (default), Deutsch, Espa\xf1ol')
# XXX The above line is correct. But if Jenkins is wrong, I
# don't wanna be right.
eq_(langs, u'English (US) (default), Deutsch, Espa\xc3\xb1ol')
self.assertSetEqual(
sorted(clean_countries),
sorted([r.name.decode() for r in regions.ALL_REGIONS]))
class TestEditMedia(TestEdit):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestEditMedia, self).setUp()
self.url = self.get_url('media')
self.edit_url = self.get_url('media', True)
self.icon_upload = self.webapp.get_dev_url('upload_icon')
self.preview_upload = self.webapp.get_dev_url('upload_preview')
patches = {
'ADDON_ICONS_PATH': tempfile.mkdtemp(),
'PREVIEW_THUMBNAIL_PATH': tempfile.mkstemp()[1] + '%s/%d.png',
}
for k, v in patches.iteritems():
patcher = mock.patch.object(settings, k, v)
patcher.start()
self.addCleanup(patcher.stop)
def formset_new_form(self, *args, **kw):
ctx = self.client.get(self.edit_url).context
blank = initial(ctx['preview_form'].forms[-1])
blank.update(**kw)
return blank
def formset_media(self, prev_blank=None, *args, **kw):
prev_blank = prev_blank or {}
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
# Preview formset.
fs = formset(*list(args) + [self.formset_new_form(**prev_blank)], **kw)
return dict((k, '' if v is None else v) for k, v in fs.items())
def new_preview_hash(self):
# At least one screenshot is required.
src_image = open(get_image_path('preview.jpg'), 'rb')
r = self.client.post(self.preview_upload,
dict(upload_image=src_image))
return {'upload_hash': json.loads(r.content)['upload_hash']}
def test_form_url(self):
self.check_form_url('media')
def test_edit_defaulticon(self):
data = dict(icon_type='')
data_formset = self.formset_media(prev_blank=self.new_preview_hash(),
**data)
r = self.client.post(self.edit_url, data_formset)
self.assertNoFormErrors(r)
webapp = self.get_webapp()
assert webapp.get_icon_url(128).endswith('default-128.png')
assert webapp.get_icon_url(64).endswith('default-64.png')
for k in data:
eq_(unicode(getattr(webapp, k)), data[k])
def test_edit_preuploadedicon(self):
data = dict(icon_type='icon/appearance')
data_formset = self.formset_media(prev_blank=self.new_preview_hash(),
**data)
r = self.client.post(self.edit_url, data_formset)
self.assertNoFormErrors(r)
webapp = self.get_webapp()
assert webapp.get_icon_url(64).endswith('appearance-64.png')
assert webapp.get_icon_url(128).endswith('appearance-128.png')
for k in data:
eq_(unicode(getattr(webapp, k)), data[k])
def test_edit_uploadedicon(self):
img = get_image_path('mozilla-sq.png')
src_image = open(img, 'rb')
response = self.client.post(self.icon_upload,
dict(upload_image=src_image))
response_json = json.loads(response.content)
webapp = self.get_webapp()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(prev_blank=self.new_preview_hash(),
**data)
r = self.client.post(self.edit_url, data_formset)
self.assertNoFormErrors(r)
webapp = self.get_webapp()
# Unfortunate hardcoding of URL.
url = webapp.get_icon_url(64)
assert ('addon_icons/%s/%s' % (webapp.id / 1000, webapp.id)) in url, (
'Unexpected path: %r' % url)
eq_(data['icon_type'], 'image/png')
# Check that it was actually uploaded.
dirname = os.path.join(settings.ADDON_ICONS_PATH,
'%s' % (webapp.id / 1000))
dest = os.path.join(dirname, '%s-32.png' % webapp.id)
eq_(storage.exists(dest), True)
eq_(Image.open(storage.open(dest)).size, (32, 32))
def test_edit_icon_log(self):
self.test_edit_uploadedicon()
log = ActivityLog.objects.all()
eq_(log.count(), 1)
eq_(log[0].action, mkt.LOG.CHANGE_ICON.id)
def test_edit_uploadedicon_noresize(self):
img = '%s/img/mkt/logos/128.png' % settings.MEDIA_ROOT
src_image = open(img, 'rb')
data = dict(upload_image=src_image)
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
webapp = self.get_webapp()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(prev_blank=self.new_preview_hash(),
**data)
r = self.client.post(self.edit_url, data_formset)
self.assertNoFormErrors(r)
webapp = self.get_webapp()
# Unfortunate hardcoding of URL.
addon_url = webapp.get_icon_url(64).split('?')[0]
end = 'addon_icons/%s/%s-64.png' % (webapp.id / 1000, webapp.id)
assert addon_url.endswith(end), 'Unexpected path: %r' % addon_url
eq_(data['icon_type'], 'image/png')
# Check that it was actually uploaded.
dirname = os.path.join(settings.ADDON_ICONS_PATH,
'%s' % (webapp.id / 1000))
dest = os.path.join(dirname, '%s-64.png' % webapp.id)
assert storage.exists(dest), dest
eq_(Image.open(storage.open(dest)).size, (64, 64))
def test_media_types(self):
res = self.client.get(self.get_url('media', edit=True))
doc = pq(res.content)
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
def check_image_type(self, url, msg):
img = '%s/js/devreg/devhub.js' % settings.MEDIA_ROOT
self.check_image_type_path(img, url, msg)
def check_image_type_path(self, img, url, msg):
src_image = open(img, 'rb')
res = self.client.post(url, {'upload_image': src_image})
response_json = json.loads(res.content)
assert any(e == msg for e in response_json['errors']), (
response_json['errors'])
# The check_image_type method uploads js, so let's try sending that
# to ffmpeg to see what it thinks.
@mock.patch.object(mkt, 'VIDEO_TYPES', ['application/javascript'])
def test_edit_video_wrong_type(self):
raise SkipTest
self.check_image_type(self.preview_upload, 'Videos must be in WebM.')
def test_edit_icon_wrong_type(self):
self.check_image_type(self.icon_upload,
'Icons must be either PNG or JPG.')
def test_edit_screenshot_wrong_type(self):
self.check_image_type(self.preview_upload,
'Images must be either PNG or JPG.')
def setup_image_status(self):
self.icon_dest = os.path.join(self.webapp.get_icon_dir(),
'%s-64.png' % self.webapp.id)
os.makedirs(os.path.dirname(self.icon_dest))
open(self.icon_dest, 'w')
self.preview = self.webapp.previews.create()
self.preview.save()
os.makedirs(os.path.dirname(self.preview.thumbnail_path))
open(self.preview.thumbnail_path, 'w')
self.url = self.webapp.get_dev_url('ajax.image.status')
def test_icon_square(self):
img = get_image_path('mozilla.png')
self.check_image_type_path(img, self.icon_upload,
'Icons must be square.')
def test_icon_status_no_choice(self):
self.webapp.update(icon_type='')
url = self.webapp.get_dev_url('ajax.image.status')
result = json.loads(self.client.get(url).content)
assert result['icons']
def test_icon_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_icon_status_fails(self):
self.setup_image_status()
os.remove(self.icon_dest)
result = json.loads(self.client.get(self.url).content)
assert not result['icons']
def test_preview_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
# No previews means that all the images are done.
self.webapp.previews.all().delete()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
def test_preview_status_fails(self):
self.setup_image_status()
os.remove(self.preview.thumbnail_path)
result = json.loads(self.client.get(self.url).content)
assert not result['previews']
def test_image_status_default(self):
self.setup_image_status()
os.remove(self.icon_dest)
self.webapp.update(icon_type='icon/photos')
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_icon_size_req(self):
filehandle = open(get_image_path('mkt_icon_72.png'), 'rb')
res = self.client.post(self.icon_upload, {'upload_image': filehandle})
response_json = json.loads(res.content)
assert any(e == 'Icons must be at least 128px by 128px.' for e in
response_json['errors'])
def check_image_animated(self, url, msg):
filehandle = open(get_image_path('animated.png'), 'rb')
res = self.client.post(url, {'upload_image': filehandle})
response_json = json.loads(res.content)
assert any(e == msg for e in response_json['errors'])
def test_icon_animated(self):
self.check_image_animated(self.icon_upload,
'Icons cannot be animated.')
def test_screenshot_animated(self):
self.check_image_animated(self.preview_upload,
'Images cannot be animated.')
@mock.patch('lib.video.ffmpeg.Video')
@mock.patch('mkt.developers.utils.video_library')
def add(self, handle, Video, video_library, num=1):
data_formset = self.formset_media(upload_image=handle)
r = self.client.post(self.preview_upload, data_formset)
self.assertNoFormErrors(r)
upload_hash = json.loads(r.content)['upload_hash']
# Create and post with the formset.
fields = []
for i in xrange(num):
fields.append(self.formset_new_form(upload_hash=upload_hash,
position=i))
data_formset = self.formset_media(*fields)
r = self.client.post(self.edit_url, data_formset)
self.assertNoFormErrors(r)
def preview_add(self, num=1):
self.add(open(get_image_path('preview.jpg'), 'rb'), num=num)
@mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm'))
def preview_video_add(self, num=1):
self.add(open(video_files['good'], 'rb'), num=num)
@mock.patch('lib.video.ffmpeg.Video')
@mock.patch('mkt.developers.utils.video_library')
def add_json(self, handle, Video, video_library):
data_formset = self.formset_media(upload_image=handle)
result = self.client.post(self.preview_upload, data_formset)
return json.loads(result.content)
@mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm'))
def test_edit_preview_video_add_hash(self):
res = self.add_json(open(video_files['good'], 'rb'))
assert not res['errors'], res['errors']
assert res['upload_hash'].endswith('.video-webm'), res['upload_hash']
def test_edit_preview_add_hash(self):
res = self.add_json(open(get_image_path('preview.jpg'), 'rb'))
assert res['upload_hash'].endswith('.image-jpeg'), res['upload_hash']
def test_edit_preview_add_hash_size(self):
res = self.add_json(open(get_image_path('mozilla.png'), 'rb'))
assert any(e.startswith('App previews ') for e in res['errors']), (
'Small screenshot not flagged for size.')
@mock.patch.object(settings, 'MAX_VIDEO_UPLOAD_SIZE', 1)
@mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm'))
def test_edit_preview_video_size(self):
res = self.add_json(open(video_files['good'], 'rb'))
assert any(e.startswith('Please use files smaller than')
for e in res['errors']), (res['errors'])
@mock.patch('lib.video.tasks.resize_video')
@mock.patch('mimetypes.guess_type', lambda *a: ('video/webm', 'webm'))
def test_edit_preview_video_add(self, resize_video):
eq_(self.get_webapp().previews.count(), 0)
self.preview_video_add()
eq_(self.get_webapp().previews.count(), 1)
def test_edit_preview_add(self):
eq_(self.get_webapp().previews.count(), 0)
self.preview_add()
eq_(self.get_webapp().previews.count(), 1)
def test_edit_preview_edit(self):
self.preview_add()
preview = self.get_webapp().previews.all()[0]
edited = {'upload_hash': 'xxx',
'id': preview.id,
'position': preview.position,
'file_upload': None}
data_formset = self.formset_media(edited, initial_count=1)
self.client.post(self.edit_url, data_formset)
eq_(self.get_webapp().previews.count(), 1)
def test_edit_preview_reorder(self):
self.preview_add(3)
previews = list(self.get_webapp().previews.all())
base = dict(upload_hash='xxx', file_upload=None)
# Three preview forms were generated; mix them up here.
a = dict(position=1, id=previews[2].id)
b = dict(position=2, id=previews[0].id)
c = dict(position=3, id=previews[1].id)
a.update(base)
b.update(base)
c.update(base)
# Add them in backwards ("third", "second", "first")
data_formset = self.formset_media({}, *(c, b, a), initial_count=3)
eq_(data_formset['files-0-id'], previews[1].id)
eq_(data_formset['files-1-id'], previews[0].id)
eq_(data_formset['files-2-id'], previews[2].id)
self.client.post(self.edit_url, data_formset)
# They should come out "first", "second", "third".
eq_(self.get_webapp().previews.all()[0].id, previews[2].id)
eq_(self.get_webapp().previews.all()[1].id, previews[0].id)
eq_(self.get_webapp().previews.all()[2].id, previews[1].id)
def test_edit_preview_delete(self):
self.preview_add()
self.preview_add()
orig_previews = self.get_webapp().previews.all()
# Delete second preview. Keep the first.
edited = {'DELETE': 'checked',
'upload_hash': 'xxx',
'id': orig_previews[1].id,
'position': 0,
'file_upload': None}
ctx = self.client.get(self.edit_url).context
first = initial(ctx['preview_form'].forms[0])
first['upload_hash'] = 'xxx'
data_formset = self.formset_media(edited, *(first,), initial_count=2)
r = self.client.post(self.edit_url, data_formset)
self.assertNoFormErrors(r)
# First one should still be there.
eq_(list(self.get_webapp().previews.all()), [orig_previews[0]])
def test_edit_preview_add_another(self):
self.preview_add()
self.preview_add()
eq_(self.get_webapp().previews.count(), 2)
def test_edit_preview_add_two(self):
self.preview_add(2)
eq_(self.get_webapp().previews.count(), 2)
def test_screenshot_video_required(self):
r = self.client.post(self.edit_url, self.formset_media())
eq_(r.context['preview_form'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_screenshot_with_icon(self):
self.preview_add()
preview = self.get_webapp().previews.all()[0]
edited = {'upload_hash': '', 'id': preview.id}
data_formset = self.formset_media(edited, initial_count=1)
data_formset.update(icon_type='image/png', icon_upload_hash='')
r = self.client.post(self.edit_url, data_formset)
self.assertNoFormErrors(r)
class TestEditDetails(TestEdit):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestEditDetails, self).setUp()
self.url = self.get_url('details')
self.edit_url = self.get_url('details', edit=True)
def get_dict(self, **kw):
data = dict(default_locale='en-US',
homepage='http://twitter.com/fligtarsmom',
privacy_policy="fligtar's mom does <em>not</em> share "
"your data with third parties.")
data.update(kw)
return data
def test_form_url(self):
self.check_form_url('details')
def test_edit(self):
data = self.get_dict()
r = self.client.post(self.edit_url, data)
self.assertNoFormErrors(r)
self.compare(data)
def test_privacy_policy_xss(self):
self.webapp.privacy_policy = ("We\n<b>own</b>your"
"<script>alert('soul')</script>")
self.webapp.save()
r = self.client.get(self.url)
eq_(pq(r.content)('#addon-privacy-policy span[lang]').html(),
"We<br/><b>own</b>your<script>"
"alert('soul')</script>")
def test_edit_exclude_optional_fields(self):
data = self.get_dict()
data.update(default_locale='en-US', homepage='',
privacy_policy='we sell your data to everyone')
r = self.client.post(self.edit_url, data)
self.assertNoFormErrors(r)
self.compare(data)
def test_edit_default_locale_required_trans(self):
# name and description are required in the new locale.
def missing(f):
return error % ', '.join(map(repr, f))
data = self.get_dict()
data.update(description='bullocks',
homepage='http://omg.org/yes',
privacy_policy='your data is delicious')
fields = ['name', 'description']
error = ('Before changing your default locale you must have a name '
'and description in that locale. You are missing %s.')
data.update(default_locale='pt-BR')
r = self.client.post(self.edit_url, data)
self.assertFormError(r, 'form', None, missing(fields))
# Now we have a name.
self.webapp.name = {'pt-BR': 'pt-BR name'}
self.webapp.save()
fields.remove('name')
r = self.client.post(self.edit_url, data)
self.assertFormError(r, 'form', None, missing(fields))
def test_edit_default_locale_frontend_error(self):
data = self.get_dict()
data.update(description='xx', homepage='http://google.com',
default_locale='pt-BR', privacy_policy='pp')
rp = self.client.post(self.edit_url, data)
self.assertContains(rp,
'Before changing your default locale you must')
def test_edit_locale(self):
self.webapp.update(default_locale='en-US')
r = self.client.get(self.url)
eq_(pq(r.content)('.addon_edit_locale').eq(0).text(),
'English (US)')
def test_homepage_url_optional(self):
r = self.client.post(self.edit_url, self.get_dict(homepage=''))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
r = self.client.post(self.edit_url,
self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form', 'homepage', 'Enter a valid URL.')
def test_games_already_excluded_in_brazil(self):
AER.objects.create(addon=self.webapp, region=mkt.regions.BRA.id)
games = 'games'
r = self.client.post(
self.edit_url, self.get_dict(categories=[games]))
self.assertNoFormErrors(r)
eq_(list(AER.objects.filter(addon=self.webapp)
.values_list('region', flat=True)),
[mkt.regions.BRA.id])
class TestEditSupport(TestEdit):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestEditSupport, self).setUp()
self.url = self.get_url('support')
self.edit_url = self.get_url('support', edit=True)
def test_form_url(self):
self.check_form_url('support')
def test_edit_support(self):
data = dict(support_email='[email protected]',
support_url='http://apple.com/')
res = self.client.post(self.edit_url, data)
self.assertNoFormErrors(res)
self.compare(data)
def test_edit_support_required(self):
res = self.client.post(self.edit_url, {})
self.assertFormError(
res, 'form', 'support',
'You must provide either a website, an email, or both.')
def test_edit_support_only_one_is_required(self):
data = dict(support_email='[email protected]', support_url='')
res = self.client.post(self.edit_url, data)
self.assertNoFormErrors(res)
self.compare(data)
data = dict(support_email='', support_url='http://my.support.us')
res = self.client.post(self.edit_url, data)
self.assertNoFormErrors(res)
self.compare(data)
def test_edit_support_errors(self):
data = dict(support_email='', support_url='http://my')
res = self.client.post(self.edit_url, data)
self.assertFormError(res, 'form', 'support_url',
'Enter a valid URL.')
ok_(not pq(res.content)('#trans-support_email+.errorlist'))
ok_(pq(res.content)('#trans-support_url+.errorlist'))
data = dict(support_email='test', support_url='')
res = self.client.post(self.edit_url, data)
self.assertFormError(res, 'form', 'support_email',
'Enter a valid email address.')
ok_(pq(res.content)('#trans-support_email+.errorlist'))
ok_(not pq(res.content)('#trans-support_url+.errorlist'))
class TestEditTechnical(TestEdit):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestEditTechnical, self).setUp()
self.url = self.get_url('technical')
self.edit_url = self.get_url('technical', edit=True)
self.latest_file = self.get_webapp().latest_version.all_files[0]
def test_form_url(self):
self.check_form_url('technical')
def test_toggle_flash(self):
# Turn flash on.
r = self.client.post(self.edit_url, formset(**{'flash': 'on'}))
self.assertNoFormErrors(r)
self.latest_file.reload()
self.compare({'uses_flash': True}, instance=self.latest_file)
# And off.
r = self.client.post(self.edit_url, formset(**{'flash': ''}))
self.latest_file.reload()
self.compare({'uses_flash': False}, instance=self.latest_file)
def test_toggle_flash_rejected(self):
# Reject the app.
app = self.get_webapp()
app.update(status=mkt.STATUS_REJECTED)
app.versions.latest().all_files[0].update(status=mkt.STATUS_DISABLED)
app.update_version()
self.test_toggle_flash()
def test_public_stats(self):
o = ActivityLog.objects
eq_(o.count(), 0)
eq_(self.webapp.public_stats, False)
assert not self.webapp.public_stats, (
'Unexpectedly found public stats for app. Says Basta.')
r = self.client.post(self.edit_url, formset(public_stats=True))
self.assertNoFormErrors(r)
self.compare({'public_stats': True})
eq_(o.filter(action=mkt.LOG.EDIT_PROPERTIES.id).count(), 1)
def test_features_hosted(self):
data_on = {'has_contacts': True}
data_off = {'has_contacts': False}
assert not RereviewQueue.objects.filter(addon=self.webapp).exists()
# Turn contacts on.
r = self.client.post(self.edit_url, formset(**data_on))
self.assertNoFormErrors(r)
self.compare_features(data_on)
# And turn it back off.
r = self.client.post(self.edit_url, formset(**data_off))
self.assertNoFormErrors(r)
self.compare_features(data_off)
# Changing features must trigger re-review.
assert RereviewQueue.objects.filter(addon=self.webapp).exists()
def test_features_hosted_app_rejected(self):
# Reject the app.
app = self.get_webapp()
app.update(status=mkt.STATUS_REJECTED)
app.versions.latest().all_files[0].update(status=mkt.STATUS_DISABLED)
app.update_version()
assert not RereviewQueue.objects.filter(addon=self.webapp).exists()
data_on = {'has_contacts': True}
data_off = {'has_contacts': False}
# Display edit technical page
r = self.client.get(self.edit_url)
eq_(r.status_code, 200)
# Turn contacts on.
r = self.client.post(self.edit_url, formset(**data_on))
app = self.get_webapp()
self.assertNoFormErrors(r)
self.compare_features(data_on, version=app.latest_version)
# Display edit technical page again, is the feature on ?
r = self.client.get(self.edit_url)
eq_(r.status_code, 200)
ok_(pq(r.content)('#id_has_contacts:checked'))
# And turn it back off.
r = self.client.post(self.edit_url, formset(**data_off))
app = self.get_webapp()
self.assertNoFormErrors(r)
self.compare_features(data_off, version=app.latest_version)
# Changing features on a rejected app must NOT trigger re-review.
assert not RereviewQueue.objects.filter(addon=self.webapp).exists()
class TestAdmin(TestEdit):
fixtures = TestEdit.fixtures
def setUp(self):
super(TestAdmin, self).setUp()
self.url = self.get_url('admin')
self.edit_url = self.get_url('admin', edit=True)
self.webapp = self.get_webapp()
self.login('[email protected]')
def log_in_user(self):
self.login(self.user.email)
def log_in_with(self, rules):
user = UserProfile.objects.get(email='[email protected]')
group = Group.objects.create(name='Whatever', rules=rules)
GroupUser.objects.create(group=group, user=user)
self.login(user.email)
class TestAdminSettings(TestAdmin):
fixtures = TestEdit.fixtures
def test_form_url(self):
self.check_form_url('admin')
def test_overview_visible_as_admin(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('form').length, 1)
assert not r.context.get('form'), (
'Admin Settings form should not be in context')
def test_overview_forbidden_for_nonadmin(self):
self.log_in_user()
eq_(self.client.head(self.url).status_code, 403)
def test_edit_get_as_admin(self):
r = self.client.get(self.edit_url)
eq_(r.status_code, 200)
eq_(pq(r.content)('form').length, 1)
assert r.context.get('form'), 'Admin Settings form expected in context'
def test_edit_post_as_admin(self):
# There are errors, but I don't care. I just want to see if I can POST.
eq_(self.client.post(self.edit_url).status_code, 200)
def test_edit_no_get_as_nonadmin(self):
self.log_in_user()
eq_(self.client.get(self.edit_url).status_code, 403)
def test_edit_no_post_as_nonadmin(self):
self.log_in_user()
eq_(self.client.post(self.edit_url).status_code, 403)
def post_contact(self, **kw):
data = {'position': '1',
'upload_hash': 'abcdef',
'mozilla_contact': '[email protected]'}
data.update(kw)
return self.client.post(self.edit_url, data)
def test_mozilla_contact(self):
self.post_contact()
webapp = self.get_webapp()
eq_(webapp.mozilla_contact, '[email protected]')
def test_mozilla_contact_cleared(self):
self.post_contact(mozilla_contact='')
webapp = self.get_webapp()
eq_(webapp.mozilla_contact, '')
def test_mozilla_contact_invalid(self):
r = self.post_contact(
mozilla_contact='<script>alert("xss")</script>@mozilla.com')
webapp = self.get_webapp()
self.assertFormError(r, 'form', 'mozilla_contact',
'Enter a valid email address.')
eq_(webapp.mozilla_contact, '')
def test_vip_app_toggle(self):
# Turn on.
data = {
'position': 1, # Required, useless in this test.
'vip_app': 'on'
}
r = self.client.post(self.edit_url, data)
self.assertNoFormErrors(r)
self.compare({'vip_app': True})
# And off.
data.update({'vip_app': ''})
r = self.client.post(self.edit_url, data)
self.compare({'vip_app': False})
def test_priority_review_toggle(self):
# Turn on.
data = {
'position': 1, # Required, useless in this test.
'priority_review': 'on'
}
r = self.client.post(self.edit_url, data)
self.assertNoFormErrors(r)
self.compare({'priority_review': True})
# And off.
data = {'position': 1}
r = self.client.post(self.edit_url, data)
self.compare({'priority_review': False})
def test_staff(self):
# Staff and Support Staff should have Apps:Configure.
self.log_in_with('Apps:Configure')
# Test GET.
r = self.client.get(self.edit_url)
eq_(r.status_code, 200)
eq_(pq(r.content)('form').length, 1)
assert r.context.get('form'), 'Admin Settings form expected in context'
# Test POST. Ignore errors.
eq_(self.client.post(self.edit_url).status_code, 200)
def test_developer(self):
# Developers have read-only on admin section.
self.log_in_with('Apps:ViewConfiguration')
# Test GET.
r = self.client.get(self.edit_url)
eq_(r.status_code, 200)
eq_(pq(r.content)('form').length, 1)
assert r.context.get('form'), 'Admin Settings form expected in context'
# Test POST. Ignore errors.
eq_(self.client.post(self.edit_url).status_code, 403)
def test_banner_region_view(self):
self.log_in_with('Apps:ViewConfiguration')
geodata = self.get_webapp().geodata
geodata.banner_message = u'Exclusive message ! Only for AR/BR !'
geodata.banner_regions = [mkt.regions.BRA.id, mkt.regions.ARG.id]
geodata.save()
res = self.client.get(self.url)
eq_(pq(res.content)('#id_banner_message').text(),
unicode(geodata.banner_message))
eq_(pq(res.content)('#id_banner_regions').text(), u'Argentina, Brazil')
def test_banner_region_edit(self):
self.log_in_with('Apps:ViewConfiguration')
geodata = self.webapp.geodata
geodata.banner_message = u'Exclusive message ! Only for AR/BR !'
geodata.banner_regions = [mkt.regions.BRA.id, mkt.regions.ARG.id]
geodata.save()
AER.objects.create(addon=self.webapp, region=mkt.regions.USA.id)
res = self.client.get(self.edit_url)
eq_(res.status_code, 200)
doc = pq(res.content)
inputs = doc.find('input[type=checkbox][name=banner_regions]')
eq_(inputs.length, len(mkt.regions.REGIONS_CHOICES_ID))
checked = doc.find('#id_banner_regions input[type=checkbox]:checked')
eq_(checked.length, 2)
eq_(checked[0].name, 'banner_regions')
eq_(checked[0].value, unicode(mkt.regions.ARG.id))
eq_(pq(checked[0]).parents('li').attr('data-region'),
unicode(mkt.regions.ARG.id))
eq_(checked[1].name, 'banner_regions')
eq_(checked[1].value, unicode(mkt.regions.BRA.id))
eq_(pq(checked[1]).parents('li').attr('data-region'),
unicode(mkt.regions.BRA.id))
def test_banner_region_edit_post(self):
data = {
'position': 1, # Required, useless in this test.
'banner_regions': [unicode(mkt.regions.BRA.id),
unicode(mkt.regions.ESP.id)],
'banner_message_en-us': u'Oh Hai.',
}
res = self.client.post(self.edit_url, data)
eq_(res.status_code, 200)
geodata = self.webapp.geodata.reload()
eq_(geodata.banner_message, data['banner_message_en-us'])
eq_(geodata.banner_regions, [mkt.regions.BRA.id, mkt.regions.ESP.id])
class TestPromoUpload(TestAdmin):
fixtures = TestEdit.fixtures
def post(self, **kw):
data = {'position': '1',
'upload_hash': 'abcdef'}
data.update(kw)
self.client.post(self.edit_url, data)
def test_add(self):
self.post()
webapp = self.get_webapp()
eq_(webapp.previews.count(), 1)
eq_(list(webapp.get_previews()), [])
promo = webapp.get_promo()
eq_(promo.position, -1)
def test_delete(self):
self.post()
assert self.get_webapp().get_promo()
self.post(DELETE=True)
assert not self.get_webapp().get_promo()
class TestEditVersion(TestEdit):
fixtures = fixture('group_admin', 'user_999', 'user_admin',
'user_admin_group', 'webapp_337141')
def setUp(self):
self.webapp = self.get_webapp()
self.webapp.update(is_packaged=True)
self.version_pk = self.webapp.latest_version.pk
self.url = reverse('mkt.developers.apps.versions.edit', kwargs={
'version_id': self.version_pk,
'app_slug': self.webapp.app_slug
})
self.user = UserProfile.objects.get(email='[email protected]')
self.login(self.user)
def test_post(self, **kwargs):
data = {'releasenotes_init': '',
'releasenotes_en-us': 'Hot new version',
'approvalnotes': 'The release notes are true.',
'has_audio': False,
'has_apps': False}
data.update(kwargs)
req = self.client.post(self.url, data)
eq_(req.status_code, 302)
version = Version.objects.no_cache().get(pk=self.version_pk)
eq_(version.releasenotes, data['releasenotes_en-us'])
eq_(version.approvalnotes, data['approvalnotes'])
return version
def test_approval_notes_comm_thread(self):
# With empty note.
self.test_post(approvalnotes='')
eq_(CommunicationNote.objects.count(), 0)
self.test_post(approvalnotes='abc')
notes = CommunicationNote.objects.all()
eq_(notes.count(), 1)
eq_(notes[0].body, 'abc')
eq_(notes[0].note_type, comm.DEVELOPER_VERSION_NOTE_FOR_REVIEWER)
def test_existing_features_initial_form_data(self):
features = self.webapp.current_version.features
features.update(has_audio=True, has_apps=True)
r = self.client.get(self.url)
eq_(r.context['appfeatures_form'].initial,
dict(id=features.id, **features.to_dict()))
@mock.patch('mkt.webapps.tasks.index_webapps.delay')
def test_new_features(self, index_webapps):
assert not RereviewQueue.objects.filter(addon=self.webapp).exists()
index_webapps.reset_mock()
old_modified = self.webapp.modified
# Turn a feature on.
version = self.test_post(has_audio=True)
ok_(version.features.has_audio)
ok_(not version.features.has_apps)
# Addon modified date must have changed.
addon = self.get_webapp()
ok_(addon.modified > old_modified)
old_modified = self.webapp.modified
index_webapps.reset_mock()
# Then turn the feature off.
version = self.test_post(has_audio=False)
ok_(not version.features.has_audio)
ok_(not version.features.has_apps)
# Changing features must trigger re-review.
assert RereviewQueue.objects.filter(addon=self.webapp).exists()
# Addon modified date must have changed.
addon = self.get_webapp()
ok_(addon.modified > old_modified)
# Changing features must trigger a reindex.
eq_(index_webapps.call_count, 1)
def test_features_uncheck_all(self):
version = self.test_post(has_audio=True)
ok_(version.features.has_audio)
req = self.client.post(self.url, {}) # Empty POST should uncheck all.
eq_(req.status_code, 302)
version.features.reload()
ok_(not version.features.has_audio)
def test_correct_version_features(self):
new_version = self.webapp.latest_version.update(id=self.version_pk + 1)
self.webapp.update(_latest_version=new_version)
self.test_new_features()
def test_publish_checkbox_presence(self):
res = self.client.get(self.url)
ok_(not pq(res.content)('#id_publish_immediately'))
self.webapp.latest_version.files.update(status=mkt.STATUS_PENDING)
res = self.client.get(self.url)
ok_(pq(res.content)('#id_publish_immediately'))
| {
"content_hash": "af69795c38d1857ee81a302ad611bf97",
"timestamp": "",
"source": "github",
"line_count": 1577,
"max_line_length": 79,
"avg_line_length": 37.81610653138871,
"alnum_prop": 0.600241464886981,
"repo_name": "clouserw/zamboni",
"id": "912823268152687be96b798734c9ec505bafb8fb",
"size": "59669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/developers/tests/test_views_edit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357623"
},
{
"name": "HTML",
"bytes": "2134650"
},
{
"name": "JavaScript",
"bytes": "532610"
},
{
"name": "Makefile",
"bytes": "4172"
},
{
"name": "Python",
"bytes": "3908875"
},
{
"name": "SQLPL",
"bytes": "98"
},
{
"name": "Shell",
"bytes": "10972"
},
{
"name": "Smarty",
"bytes": "1369"
}
],
"symlink_target": ""
} |
"""
.. describe:: ./manage.py mtbalance
Print the current account balance.
"""
from django.conf import settings
from django.core.management.base import BaseCommand
from mturk.utils import get_mturk_connection
class Command(BaseCommand):
args = '' # '<dir1> <dir2> ...'
help = 'Prints the current balance'
def handle(self, *args, **options):
print >>self.stdout, 'MTurk info:'
for key in dir(settings):
if key.startswith('MTURK') or 'DEBUG' in key:
print ' %s: %s' % (key, getattr(settings, key))
print '\nFetching account balance...'
print 'Account balance:', get_mturk_connection().get_account_balance()
| {
"content_hash": "e79628db1140c3bd23fc00d38824ab65",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 27.64,
"alnum_prop": 0.6309696092619392,
"repo_name": "seanbell/opensurfaces",
"id": "1c6d41c1dfe7676fbb133cbbe1c96654e770ee8e",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/mturk/management/commands/mtbalance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2134399"
},
{
"name": "C++",
"bytes": "944309"
},
{
"name": "CMake",
"bytes": "1314"
},
{
"name": "CSS",
"bytes": "332038"
},
{
"name": "CoffeeScript",
"bytes": "245856"
},
{
"name": "HTML",
"bytes": "286807"
},
{
"name": "JavaScript",
"bytes": "395211"
},
{
"name": "Lua",
"bytes": "4605"
},
{
"name": "M",
"bytes": "43"
},
{
"name": "Makefile",
"bytes": "9862"
},
{
"name": "Matlab",
"bytes": "69652"
},
{
"name": "Objective-C",
"bytes": "547"
},
{
"name": "Python",
"bytes": "2161982"
},
{
"name": "Shell",
"bytes": "54309"
},
{
"name": "TeX",
"bytes": "35639"
}
],
"symlink_target": ""
} |
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._container_apps_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_list_custom_host_name_analysis_request, build_list_secrets_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ContainerAppsOperations:
"""ContainerAppsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~container_apps_api_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.ContainerAppCollection"]:
"""Get the Container Apps in a given subscription.
Get the Container Apps in a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerAppCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~container_apps_api_client.models.ContainerAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.App/containerApps'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ContainerAppCollection"]:
"""Get the Container Apps in a given resource group.
Get the Container Apps in a given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerAppCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~container_apps_api_client.models.ContainerAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.ContainerApp":
"""Get the properties of a Container App.
Get the properties of a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param name: Name of the Container App.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerApp, or the result of cls(response)
:rtype: ~container_apps_api_client.models.ContainerApp
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApp"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerApp', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{name}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
name: str,
container_app_envelope: "_models.ContainerApp",
**kwargs: Any
) -> "_models.ContainerApp":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApp"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(container_app_envelope, 'ContainerApp')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ContainerApp', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ContainerApp', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{name}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
name: str,
container_app_envelope: "_models.ContainerApp",
**kwargs: Any
) -> AsyncLROPoller["_models.ContainerApp"]:
"""Create or update a Container App.
Description for Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param name: Name of the Container App.
:type name: str
:param container_app_envelope: Properties used to create a container app.
:type container_app_envelope: ~container_apps_api_client.models.ContainerApp
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~container_apps_api_client.models.ContainerApp]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApp"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
name=name,
container_app_envelope=container_app_envelope,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ContainerApp', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{name}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{name}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a Container App.
Description for Delete a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param name: Name of the Container App.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{name}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
name: str,
container_app_envelope: "_models.ContainerAppPatch",
**kwargs: Any
) -> "_models.ContainerApp":
"""Update properties of a Container App.
Patches a Container App. Currently only patching of tags is supported.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param name: Name of the Container App.
:type name: str
:param container_app_envelope: Properties of a container app that need to be updated.
:type container_app_envelope: ~container_apps_api_client.models.ContainerAppPatch
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerApp, or the result of cls(response)
:rtype: ~container_apps_api_client.models.ContainerApp
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerApp"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(container_app_envelope, 'ContainerAppPatch')
request = build_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContainerApp', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{name}'} # type: ignore
@distributed_trace_async
async def list_custom_host_name_analysis(
self,
resource_group_name: str,
container_app_name: str,
custom_hostname: Optional[str] = None,
**kwargs: Any
) -> "_models.CustomHostnameAnalysisResult":
"""Analyzes a custom hostname for a Container App.
Analyzes a custom hostname for a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param container_app_name: Name of the Container App.
:type container_app_name: str
:param custom_hostname: Custom hostname.
:type custom_hostname: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomHostnameAnalysisResult, or the result of cls(response)
:rtype: ~container_apps_api_client.models.CustomHostnameAnalysisResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomHostnameAnalysisResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_custom_host_name_analysis_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
container_app_name=container_app_name,
custom_hostname=custom_hostname,
template_url=self.list_custom_host_name_analysis.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomHostnameAnalysisResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_custom_host_name_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listCustomHostNameAnalysis'} # type: ignore
@distributed_trace_async
async def list_secrets(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.SecretsCollection":
"""List secrets for a container app.
List secrets for a container app.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param name: Name of the Container App.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretsCollection, or the result of cls(response)
:rtype: ~container_apps_api_client.models.SecretsCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretsCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_secrets_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
template_url=self.list_secrets.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecretsCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_secrets.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{name}/listSecrets'} # type: ignore
| {
"content_hash": "6abbbfdd268fddd6c192dcaa442fe1d8",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 310,
"avg_line_length": 44.79080824088748,
"alnum_prop": 0.6516647206595195,
"repo_name": "Azure/azure-sdk-for-python",
"id": "abea0c93db47d1ab8b23c311d6871ef02c9ab292",
"size": "28730",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/app/azure-mgmt-app/azure/mgmt/app/aio/operations/_container_apps_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
import json
import unittest
import responses
from linebot import (
LineBotApi
)
from linebot.models import (
VideoSendMessage
)
class TestLineBotApi(unittest.TestCase):
def setUp(self):
self.tested = LineBotApi('channel_secret')
self.video_message = VideoSendMessage(
original_content_url='https://example.com/original.mp4',
preview_image_url='https://example.com/preview.jpg'
)
self.message = [{
"type": "video",
"originalContentUrl": "https://example.com/original.mp4",
"previewImageUrl": "https://example.com/preview.jpg",
}]
@responses.activate
def test_push_video_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push',
json={}, status=200
)
self.tested.push_message('to', self.video_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push')
self.assertEqual(
json.loads(request.body),
{
"to": "to",
"messages": self.message
}
)
@responses.activate
def test_reply_video_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply',
json={}, status=200
)
self.tested.reply_message('replyToken', self.video_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply')
self.assertEqual(
json.loads(request.body),
{
"replyToken": "replyToken",
"messages": self.message
}
)
@responses.activate
def test_multicast_video_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast',
json={}, status=200
)
self.tested.multicast(['to1', 'to2'], self.video_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast')
self.assertEqual(
json.loads(request.body),
{
"to": ['to1', 'to2'],
"messages": self.message
}
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a37ce0d1dfb6dc68d498b4a0af765fbe",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 74,
"avg_line_length": 27.80392156862745,
"alnum_prop": 0.5571227080394923,
"repo_name": "monhustla/line-bot-sdk-python",
"id": "217b5838fa41bd11d30aaab757adfa6b868032d7",
"size": "3417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/api/test_send_video_message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "191401"
}
],
"symlink_target": ""
} |
import sys
import warnings
import pytest
import numpy as np
from .test_table import comparerecords
from ..hdu.base import _ValidHDU
from ....io import fits
from . import FitsTestCase
class TestChecksumFunctions(FitsTestCase):
# All checksums have been verified against CFITSIO
def setup(self):
super().setup()
self._oldfilters = warnings.filters[:]
warnings.filterwarnings(
'error',
message='Checksum verification failed')
warnings.filterwarnings(
'error',
message='Datasum verification failed')
# Monkey-patch the _get_timestamp method so that the checksum
# timestamps (and hence the checksum themselves) are always the same
self._old_get_timestamp = _ValidHDU._get_timestamp
_ValidHDU._get_timestamp = lambda self: '2013-12-20T13:36:10'
def teardown(self):
super().teardown()
warnings.filters = self._oldfilters
_ValidHDU._get_timestamp = self._old_get_timestamp
def test_sample_file(self):
hdul = fits.open(self.data('checksum.fits'), checksum=True)
hdul.close()
def test_image_create(self):
n = np.arange(100, dtype=np.int64)
hdu = fits.PrimaryHDU(n)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert (hdu.data == hdul[0].data).all()
assert 'CHECKSUM' in hdul[0].header
assert 'DATASUM' in hdul[0].header
if not sys.platform.startswith('win32'):
# The checksum ends up being different on Windows, possibly due
# to slight floating point differences
assert hdul[0].header['CHECKSUM'] == 'ZHMkeGKjZGKjbGKj'
assert hdul[0].header['DATASUM'] == '4950'
def test_scaled_data(self):
with fits.open(self.data('scale.fits')) as hdul:
orig_data = hdul[0].data.copy()
hdul[0].scale('int16', 'old')
hdul.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul1:
assert (hdul1[0].data == orig_data).all()
assert 'CHECKSUM' in hdul1[0].header
assert hdul1[0].header['CHECKSUM'] == 'cUmaeUjZcUjacUjW'
assert 'DATASUM' in hdul1[0].header
assert hdul1[0].header['DATASUM'] == '1891563534'
def test_scaled_data_auto_rescale(self):
"""
Regression test for
https://github.com/astropy/astropy/issues/3883#issuecomment-115122647
Ensure that when scaled data is automatically rescaled on
opening/writing a file that the checksum and datasum are computed for
the rescaled array.
"""
with fits.open(self.data('scale.fits')) as hdul:
# Write out a copy of the data with the rescaling applied
hdul.writeto(self.temp('rescaled.fits'))
# Reopen the new file and save it back again with a checksum
with fits.open(self.temp('rescaled.fits')) as hdul:
hdul.writeto(self.temp('rescaled2.fits'), overwrite=True,
checksum=True)
# Now do like in the first writeto but use checksum immediately
with fits.open(self.data('scale.fits')) as hdul:
hdul.writeto(self.temp('rescaled3.fits'), checksum=True)
# Also don't rescale the data but add a checksum
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
hdul.writeto(self.temp('scaled.fits'), checksum=True)
# Must used nested with statements to support older Python versions
# (but contextlib.nested is not available in newer Pythons :(
with fits.open(self.temp('rescaled2.fits')) as hdul1:
with fits.open(self.temp('rescaled3.fits')) as hdul2:
with fits.open(self.temp('scaled.fits')) as hdul3:
hdr1 = hdul1[0].header
hdr2 = hdul2[0].header
hdr3 = hdul3[0].header
assert hdr1['DATASUM'] == hdr2['DATASUM']
assert hdr1['CHECKSUM'] == hdr2['CHECKSUM']
assert hdr1['DATASUM'] != hdr3['DATASUM']
assert hdr1['CHECKSUM'] != hdr3['CHECKSUM']
def test_uint16_data(self):
checksums = [
('aDcXaCcXaCcXaCcX', '0'), ('oYiGqXi9oXiEoXi9', '1746888714'),
('VhqQWZoQVfoQVZoQ', '0'), ('4cPp5aOn4aOn4aOn', '0'),
('8aCN8X9N8aAN8W9N', '1756785133'), ('UhqdUZnbUfnbUZnb', '0'),
('4cQJ5aN94aNG4aN9', '0')]
with fits.open(self.data('o4sp040b0_raw.fits'), uint=True) as hdul:
hdul.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), uint=True,
checksum=True) as hdul1:
for idx, (hdu_a, hdu_b) in enumerate(zip(hdul, hdul1)):
if hdu_a.data is None or hdu_b.data is None:
assert hdu_a.data is hdu_b.data
else:
assert (hdu_a.data == hdu_b.data).all()
assert 'CHECKSUM' in hdul[idx].header
assert hdul[idx].header['CHECKSUM'] == checksums[idx][0]
assert 'DATASUM' in hdul[idx].header
assert hdul[idx].header['DATASUM'] == checksums[idx][1]
def test_groups_hdu_data(self):
imdata = np.arange(100.0)
imdata.shape = (10, 1, 1, 2, 5)
pdata1 = np.arange(10) + 0.1
pdata2 = 42
x = fits.hdu.groups.GroupData(imdata, parnames=[str('abc'), str('xyz')],
pardata=[pdata1, pdata2], bitpix=-32)
hdu = fits.GroupsHDU(x)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(hdul[0].data, hdu.data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == '3eDQAZDO4dDOAZDO'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '2797758084'
def test_binary_table_data(self):
a1 = np.array(['NGC1001', 'NGC1002', 'NGC1003'])
a2 = np.array([11.1, 12.3, 15.2])
col1 = fits.Column(name='target', format='20A', array=a1)
col2 = fits.Column(name='V_mag', format='E', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(tbhdu.data, hdul[1].data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == 'aD1Oa90MaC0Ma90M'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1062205743'
def test_variable_length_table_data(self):
c1 = fits.Column(name='var', format='PJ()',
array=np.array([[45.0, 56], np.array([11, 12, 13])],
'O'))
c2 = fits.Column(name='xyz', format='2I', array=[[11, 3], [12, 4]])
tbhdu = fits.BinTableHDU.from_columns([c1, c2])
tbhdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(tbhdu.data, hdul[1].data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == 'YIGoaIEmZIEmaIEm'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1507485'
def test_ascii_table_data(self):
a1 = np.array(['abc', 'def'])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name='abc', format='A3', array=a1)
# This column used to be E format, but the single-precision float lost
# too much precision when scaling so it was changed to a D
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
x = fits.ColDefs([c1, c2, c3])
hdu = fits.TableHDU.from_columns(x)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
if not sys.platform.startswith('win32'):
# The checksum ends up being different on Windows, possibly due
# to slight floating point differences
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == '3rKFAoI94oICAoI9'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1914653725'
def test_compressed_image_data(self):
with fits.open(self.data('comp.fits')) as h1:
h1.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as h2:
assert np.all(h1[1].data == h2[1].data)
assert 'CHECKSUM' in h2[0].header
assert h2[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in h2[0].header
assert h2[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in h2[1].header
assert h2[1].header['CHECKSUM'] == 'ZeAbdb8aZbAabb7a'
assert 'DATASUM' in h2[1].header
assert h2[1].header['DATASUM'] == '113055149'
def test_compressed_image_data_int16(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdu.writeto(self.temp('uncomp.fits'), checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert np.all(hdul[1].data == comp_hdu.data)
assert np.all(hdul[1].data == hdu.data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1]._header['CHECKSUM'] == 'J5cCJ5c9J5cAJ5c9'
assert 'DATASUM' in hdul[1].header
assert hdul[1]._header['DATASUM'] == '2453673070'
assert 'CHECKSUM' in hdul[1].header
with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
header_comp = hdul[1]._header
header_uncomp = hdul2[1].header
assert 'ZHECKSUM' in header_comp
assert 'CHECKSUM' in header_uncomp
assert header_uncomp['CHECKSUM'] == 'ZE94eE91ZE91bE91'
assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
assert 'ZDATASUM' in header_comp
assert 'DATASUM' in header_uncomp
assert header_uncomp['DATASUM'] == '160565700'
assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
def test_compressed_image_data_float32(self):
n = np.arange(100, dtype='float32')
hdu = fits.ImageHDU(n)
comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdu.writeto(self.temp('uncomp.fits'), checksum=True)
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
assert np.all(hdul[1].data == comp_hdu.data)
assert np.all(hdul[1].data == hdu.data)
assert 'CHECKSUM' in hdul[0].header
assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert 'DATASUM' in hdul[1].header
if not sys.platform.startswith('win32'):
# The checksum ends up being different on Windows, possibly due
# to slight floating point differences
assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH'
assert hdul[1]._header['DATASUM'] == '1277667818'
with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
header_comp = hdul[1]._header
header_uncomp = hdul2[1].header
assert 'ZHECKSUM' in header_comp
assert 'CHECKSUM' in header_uncomp
assert header_uncomp['CHECKSUM'] == 'Cgr5FZo2Cdo2CZo2'
assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
assert 'ZDATASUM' in header_comp
assert 'DATASUM' in header_uncomp
assert header_uncomp['DATASUM'] == '2393636889'
assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
def test_open_with_no_keywords(self):
hdul = fits.open(self.data('arange.fits'), checksum=True)
hdul.close()
def test_append(self):
hdul = fits.open(self.data('tb.fits'))
hdul.writeto(self.temp('tmp.fits'), overwrite=True)
n = np.arange(100)
fits.append(self.temp('tmp.fits'), n, checksum=True)
hdul.close()
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
assert hdul[0]._checksum is None
hdul.close()
def test_writeto_convenience(self):
n = np.arange(100)
fits.writeto(self.temp('tmp.fits'), n, overwrite=True, checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_hdu_writeto(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp('tmp.fits'), checksum=True)
hdul = fits.open(self.temp('tmp.fits'), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_hdu_writeto_existing(self):
"""
Tests that when using writeto with checksum=True, a checksum and
datasum are added to HDUs that did not previously have one.
Regression test for https://github.com/spacetelescope/PyFITS/issues/8
"""
with fits.open(self.data('tb.fits')) as hdul:
hdul.writeto(self.temp('test.fits'), checksum=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'CHECKSUM' in hdul[0].header
# These checksums were verified against CFITSIO
assert hdul[0].header['CHECKSUM'] == '7UgqATfo7TfoATfo'
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == '99daD8bX98baA8bU'
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == '1829680925'
def test_datasum_only(self):
n = np.arange(100, dtype='int16')
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum='datasum')
with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
if not (hasattr(hdul[0], '_datasum') and hdul[0]._datasum):
pytest.fail(msg='Missing DATASUM keyword')
if not (hasattr(hdul[0], '_checksum') and not hdul[0]._checksum):
pytest.fail(msg='Non-empty CHECKSUM keyword')
def test_open_update_mode_preserve_checksum(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 where
checksums are being removed from headers when a file is opened in
update mode, even though no changes were made to the file.
"""
self.copy_file('checksum.fits')
with fits.open(self.temp('checksum.fits')) as hdul:
data = hdul[1].data.copy()
hdul = fits.open(self.temp('checksum.fits'), mode='update')
hdul.close()
with fits.open(self.temp('checksum.fits')) as hdul:
assert 'CHECKSUM' in hdul[1].header
assert 'DATASUM' in hdul[1].header
assert comparerecords(data, hdul[1].data)
def test_open_update_mode_update_checksum(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148, part
2. This ensures that if a file contains a checksum, the checksum is
updated when changes are saved to the file, even if the file was opened
with the default of checksum=False.
An existing checksum and/or datasum are only stripped if the file is
opened with checksum='remove'.
"""
self.copy_file('checksum.fits')
with fits.open(self.temp('checksum.fits')) as hdul:
header = hdul[1].header.copy()
data = hdul[1].data.copy()
with fits.open(self.temp('checksum.fits'), mode='update') as hdul:
hdul[1].header['FOO'] = 'BAR'
hdul[1].data[0]['TIME'] = 42
with fits.open(self.temp('checksum.fits')) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-3] == header[:-2]
assert 'CHECKSUM' in header2
assert 'DATASUM' in header2
assert header2['FOO'] == 'BAR'
assert (data2['TIME'][1:] == data['TIME'][1:]).all()
assert data2['TIME'][0] == 42
with fits.open(self.temp('checksum.fits'), mode='update',
checksum='remove') as hdul:
pass
with fits.open(self.temp('checksum.fits')) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-1] == header[:-2]
assert 'CHECKSUM' not in header2
assert 'DATASUM' not in header2
assert header2['FOO'] == 'BAR'
assert (data2['TIME'][1:] == data['TIME'][1:]).all()
assert data2['TIME'][0] == 42
def test_overwrite_invalid(self):
"""
Tests that invalid checksum or datasum are overwriten when the file is
saved.
"""
reffile = self.temp('ref.fits')
with fits.open(self.data('tb.fits')) as hdul:
hdul.writeto(reffile, checksum=True)
testfile = self.temp('test.fits')
with fits.open(self.data('tb.fits')) as hdul:
hdul[0].header['DATASUM'] = '1 '
hdul[0].header['CHECKSUM'] = '8UgqATfo7TfoATfo'
hdul[1].header['DATASUM'] = '2349680925'
hdul[1].header['CHECKSUM'] = '11daD8bX98baA8bU'
hdul.writeto(testfile)
with fits.open(testfile) as hdul:
hdul.writeto(self.temp('test2.fits'), checksum=True)
with fits.open(self.temp('test2.fits')) as hdul:
with fits.open(reffile) as ref:
assert 'CHECKSUM' in hdul[0].header
# These checksums were verified against CFITSIO
assert hdul[0].header['CHECKSUM'] == ref[0].header['CHECKSUM']
assert 'DATASUM' in hdul[0].header
assert hdul[0].header['DATASUM'] == '0'
assert 'CHECKSUM' in hdul[1].header
assert hdul[1].header['CHECKSUM'] == ref[1].header['CHECKSUM']
assert 'DATASUM' in hdul[1].header
assert hdul[1].header['DATASUM'] == ref[1].header['DATASUM']
def _check_checksums(self, hdu):
if not (hasattr(hdu, '_datasum') and hdu._datasum):
pytest.fail(msg='Missing DATASUM keyword')
if not (hasattr(hdu, '_checksum') and hdu._checksum):
pytest.fail(msg='Missing CHECKSUM keyword')
| {
"content_hash": "24b6158ec03e1ef614f01d6ea7e31929",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 83,
"avg_line_length": 45.47692307692308,
"alnum_prop": 0.5745698820800309,
"repo_name": "funbaker/astropy",
"id": "14959407a2c2baaef42f5b6394311b6feb9e0e24",
"size": "20756",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/io/fits/tests/test_checksum.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "367279"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "8331581"
},
{
"name": "TeX",
"bytes": "805"
}
],
"symlink_target": ""
} |
import datetime
import unittest
from unittest import mock
from airflow.models import TaskInstance
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.utils import dot_renderer
from airflow.utils.state import State
START_DATE = datetime.datetime.now()
class TestDotRenderer(unittest.TestCase):
def test_should_render_dag(self):
dag = DAG(dag_id="DAG_ID")
task_1 = BashOperator(dag=dag, start_date=START_DATE, task_id="first", bash_command="echo 1")
task_2 = BashOperator(dag=dag, start_date=START_DATE, task_id="second", bash_command="echo 1")
task_3 = PythonOperator(
dag=dag, start_date=START_DATE, task_id="third", python_callable=mock.MagicMock()
)
task_1 >> task_2
task_1 >> task_3
dot = dot_renderer.render_dag(dag)
source = dot.source
# Should render DAG title
self.assertIn("label=DAG_ID", source)
self.assertIn("first", source)
self.assertIn("second", source)
self.assertIn("third", source)
self.assertIn("first -> second", source)
self.assertIn("first -> third", source)
self.assertIn('fillcolor="#f0ede4"', source)
self.assertIn('fillcolor="#f0ede4"', source)
def test_should_render_dag_with_task_instances(self):
dag = DAG(dag_id="DAG_ID")
task_1 = BashOperator(dag=dag, start_date=START_DATE, task_id="first", bash_command="echo 1")
task_2 = BashOperator(dag=dag, start_date=START_DATE, task_id="second", bash_command="echo 1")
task_3 = PythonOperator(
dag=dag, start_date=START_DATE, task_id="third", python_callable=mock.MagicMock()
)
task_1 >> task_2
task_1 >> task_3
tis = [
TaskInstance(task_1, execution_date=START_DATE, state=State.SCHEDULED),
TaskInstance(task_2, execution_date=START_DATE, state=State.SUCCESS),
TaskInstance(task_3, execution_date=START_DATE, state=State.RUNNING),
]
dot = dot_renderer.render_dag(dag, tis=tis)
source = dot.source
# Should render DAG title
self.assertIn("label=DAG_ID", source)
self.assertIn('first [color=black fillcolor=tan shape=rectangle style="filled,rounded"]', source)
self.assertIn('second [color=white fillcolor=green shape=rectangle style="filled,rounded"]', source)
self.assertIn('third [color=black fillcolor=lime shape=rectangle style="filled,rounded"]', source)
def test_should_render_dag_orientation(self):
orientation = "TB"
dag = DAG(dag_id="DAG_ID", orientation=orientation)
task_1 = BashOperator(dag=dag, start_date=START_DATE, task_id="first", bash_command="echo 1")
task_2 = BashOperator(dag=dag, start_date=START_DATE, task_id="second", bash_command="echo 1")
task_3 = PythonOperator(
dag=dag, start_date=START_DATE, task_id="third", python_callable=mock.MagicMock()
)
task_1 >> task_2
task_1 >> task_3
tis = [
TaskInstance(task_1, execution_date=START_DATE, state=State.SCHEDULED),
TaskInstance(task_2, execution_date=START_DATE, state=State.SUCCESS),
TaskInstance(task_3, execution_date=START_DATE, state=State.RUNNING),
]
dot = dot_renderer.render_dag(dag, tis=tis)
source = dot.source
# Should render DAG title with orientation
self.assertIn("label=DAG_ID", source)
self.assertIn(f'label=DAG_ID labelloc=t rankdir={orientation}', source)
# Change orientation
orientation = "LR"
dag = DAG(dag_id="DAG_ID", orientation=orientation)
dot = dot_renderer.render_dag(dag, tis=tis)
source = dot.source
# Should render DAG title with orientation
self.assertIn("label=DAG_ID", source)
self.assertIn(f'label=DAG_ID labelloc=t rankdir={orientation}', source)
| {
"content_hash": "f381bbf36edc52e3a13b7159d3295f63",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 108,
"avg_line_length": 45.04494382022472,
"alnum_prop": 0.6465452731354453,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "3016b0c32ff4c326f33a397403a6c3269caf0e68",
"size": "4818",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/utils/test_dot_renderer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
} |
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0032_datamigrate_update_for_no_none_values_in_assignment_firstdeadline'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='first_deadline',
field=models.DateTimeField(default=datetime.datetime(2100, 1, 1, 0, 0)),
preserve_default=False,
),
migrations.AlterField(
model_name='assignment',
name='points_to_grade_mapper',
field=models.CharField(blank=True, choices=[('passed-failed', 'Passed or failed'), ('raw-points', 'Points'), ('custom-table', 'Lookup in a table defined by you (A-F, and other grading systems)')], default='passed-failed', max_length=25, null=True),
),
]
| {
"content_hash": "39a019f5eab106748bb12ffb2a232054",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 260,
"avg_line_length": 37.47826086956522,
"alnum_prop": 0.622969837587007,
"repo_name": "devilry/devilry-django",
"id": "008b6eefb67033c19113c63b27ffe5fc4152b0cf",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/apps/core/migrations/0033_auto_20170220_1330.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
} |
import xmlrpclib
import functools
HOST = 'localhost'
PORT = 8069
DB = 'i18n_plantilla'
USER = 'admin'
PASS = 'admin'
ROOT = 'http://%s:%d/xmlrpc/' % (HOST,PORT)
# 1. Login
uid = xmlrpclib.ServerProxy(ROOT + 'common').login(DB,USER,PASS)
print "Logged in as %s (uid:%d)" % (USER,uid)
call = functools.partial(
xmlrpclib.ServerProxy(ROOT + 'object').execute,
DB, uid, PASS)
# 2. Read the sessions
model = 'openacademy.session'
domain = []
method_name = 'search_read'
sessions = call(model, method_name, domain, ['name', 'seats', 'taken_seats'])
print "sessions",sessions
for session in sessions:
print "Session %s (%s seats), taken seats %d" % (session['name'], session['seats'], session['taken_seats'])
method_name = 'search'
domain = [('name', '=', 'Curso Odoo 1')]
course_ids = call('openacademy.course', method_name, domain)
course_id = course_ids[0]
print "course_ids",course_ids
#method_name = 'create'
#course_id = call('openacademy.course', method_name, {'name': 'Curso Odoo 1'})
method_name = 'create'
responsible_id = call('res.partner', 'search', [('name', '=', 'Vauxoo')])[0]
print "responsible_id", responsible_id
new_sesion_id = call(model, method_name, {
'name': 'Sesion from ws',
'instructor_id': responsible_id,
'course_id': course_id,
#'attendee_ids': [(4, responsiblle_id)],
'attendee_ids': [(4, 7), (4, 3)],
})
print "new_sesion_id",new_sesion_id
| {
"content_hash": "cea812d9ca53b84733ff0fb255046aef",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 111,
"avg_line_length": 28.36,
"alnum_prop": 0.6495063469675599,
"repo_name": "arogel/openacademy-project",
"id": "68a4b41332aaac1241c8fc44352a4346296355c3",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_services/ws_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7751"
}
],
"symlink_target": ""
} |
import json
import os
from golem.core import utils
from golem.test_runner import test_runner
from golem.test_runner.conf import ResultsEnum
from golem.report.execution_report import create_execution_directory
from golem.report import test_report
from golem.report.test_report import get_test_file_report_json
from golem.report.test_report import get_test_debug_log
from golem.report.test_report import create_test_file_report_dir
from golem.report.test_report import generate_report
class TestGetTestFileReportJson:
def test_get_test_file_report_json(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
test_file = exc['exec_data']['tests'][0]['test_file']
test_set = exc['exec_data']['tests'][0]['set_name']
test_data = get_test_file_report_json(project, exc['suite_name'], exc['timestamp'],
test_file, test_set)
assert len(test_data) == 1
assert isinstance(test_data[0], dict)
assert test_data[0]['test_file'] == test_file
assert test_data[0]['test'] == 'test'
assert test_data[0]['set_name'] == ''
assert len(test_data[0]) == 12
def test_report_does_not_exist(self, project_class, test_utils):
_, project = project_class.activate()
test_data = get_test_file_report_json(project, 'execution', 'timestamp', 'test_name')
assert test_data is None
# class TestGetTestCaseData:
#
# def test_get_test_case_data(self, project_class, test_utils):
# _, project = project_class.activate()
# exc = test_utils.execute_random_suite(project)
# test_name = exc['exec_data']['tests'][0]['name']
# test_set = exc['exec_data']['tests'][0]['test_set']
#
# test_data = get_test_case_data(project, test_name, exc['suite_name'],
# exc['timestamp'], test_set)
#
# assert test_data['name'] == exc['tests'][0]
# assert isinstance(test_data['debug_log'], list) and len(test_data['debug_log'])
# assert isinstance(test_data['info_log'], list) and len(test_data['info_log'])
# assert test_data['has_finished'] is True
class TestTestFileReportDir:
def test_create_test_file_report_dir_without_setname(self, project_session):
testdir, project = project_session.activate()
suite = 'suite1'
timestamp = '1.2.3.4'
test = 'test1'
path = test_report.test_file_report_dir(test, project, suite, timestamp)
expected = os.path.join(testdir, 'projects', project, 'reports', suite, timestamp, test)
assert path == expected
def test_create_test_file_report_dir_with_setname(self, project_session):
testdir, project = project_session.activate()
suite = 'suite1'
timestamp = '1.2.3.4'
test = 'test1'
test_set = 'test_set1'
path = test_report.test_file_report_dir(test, project, suite, timestamp, test_set)
expected = os.path.join(testdir, 'projects', project, 'reports', suite, timestamp,
f'{test}.{test_set}')
assert path == expected
class TestTestFunctionReportDir:
def test_create_test_file_report_dir_without_setname(self, project_session):
testdir, project = project_session.activate()
suite = 'suite1'
timestamp = '1.2.3.4'
test_file = 'test1'
test_function = 'function1'
path = test_report.test_function_report_dir(project, suite, timestamp, test_file, test_function)
test_file_path = test_report.test_file_report_dir(test_file, project, suite, timestamp)
expected = os.path.join(test_file_path, test_function)
assert path == expected
class TestGetTestLog:
def test_get_test_x_log(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
suite_name = exc['suite_name']
test_file = exc['exec_data']['tests'][0]['test_file']
set_name = exc['exec_data']['tests'][0]['set_name']
log = get_test_debug_log(project, suite_name, exc['timestamp'], test_file, set_name)
assert f'root INFO Test execution started: {test_file}' in log[0]
# inexistent test set
log = get_test_debug_log(project, suite_name, exc['timestamp'], test_file,
'inexistent_test_set')
assert log is None
# inexistent test
log = get_test_debug_log(project, suite_name, exc['timestamp'],
'inexistent_test_name', set_name)
assert log is None
class TestCreateReportDirectory:
def test_create_report_directory_test_without_set(self, project_session):
testdir, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_001'
exec_dir = create_execution_directory(project, test_name, timestamp)
directory = create_test_file_report_dir(exec_dir, test_name, '')
assert os.path.isdir(directory)
class TestInitializeTestFileReport:
def test_initialize_test_file_report(self, project_session, test_utils):
_, project = project_session.activate()
# create a test
test_file = test_utils.random_string()
content = 'def test_one(data):\n' \
' pass\n' \
'def test_two(data):\n' \
' pass'
test_utils.create_test(project, test_file, content)
# create test file reportdir
execution = test_file
timestamp = utils.get_timestamp()
exec_dir = create_execution_directory(project, test_file, timestamp)
test_file_reportdir = create_test_file_report_dir(exec_dir, test_file, '')
# initialize report for test file
test_report.initialize_test_file_report(
test_file, ['test_one', 'test_two'], '', test_file_reportdir, '', '')
test_file_report = test_report.get_test_file_report_json(
project, execution, timestamp, test_file)
assert len(test_file_report) == 2
assert any(t['test'] == 'test_one' and t['result'] == ResultsEnum.PENDING for t in test_file_report)
assert any(t['test'] == 'test_two' and t['result'] == ResultsEnum.PENDING for t in test_file_report)
class TestGenerateReport:
def test_generate_report_with_env(self, project_session):
_, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_003'
suite_name = 'suite_foo_003'
exec_dir = create_execution_directory(project, suite_name, timestamp)
report_dir = create_test_file_report_dir(exec_dir, test_name, '')
test_data = {
'env': {
'name': 'env01',
'url': '1.1.1.1'
},
'var2': 'value2'
}
test_data = test_runner.Data(test_data)
result = {
'name': 'test_function',
'set_name': 'set_001',
'start_time': '',
'end_time': '',
'report_directory': '',
'result': 'success',
'errors': [],
'description': 'description of the test',
'steps': [
{'message': 'step1', 'screenshot': None, 'error': None},
{'message': 'step2', 'screenshot': None, 'error': None}
],
'test_elapsed_time': 22.22,
'test_timestamp': '2018.02.04.02.16.42.729',
'browser': 'chrome',
'browser_capabilities': '',
}
generate_report(test_name, result, test_data, report_dir)
path = os.path.join(report_dir, 'report.json')
with open(path) as report_file:
actual = json.load(report_file)
actual = actual[0]
assert len(actual.items()) == 12
assert actual['test_file'] == test_name
assert actual['test'] == 'test_function'
assert actual['result'] == 'success'
assert actual['steps'][0]['message'] == 'step1'
assert actual['steps'][1]['message'] == 'step2'
assert actual['description'] == 'description of the test'
assert actual['errors'] == []
assert actual['elapsed_time'] == 22.22
assert actual['timestamp'] == '2018.02.04.02.16.42.729'
assert actual['browser'] == 'chrome'
assert actual['environment'] == 'env01'
assert actual['set_name'] == 'set_001'
test_data_a = "{'url': '1.1.1.1', 'name': 'env01'}"
test_data_b = "{'name': 'env01', 'url': '1.1.1.1'}"
assert actual['test_data']['env'] in [test_data_a, test_data_b]
assert actual['test_data']['var2'] == "'value2'"
| {
"content_hash": "0729348cd747376bd72ac340452298bf",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 108,
"avg_line_length": 42,
"alnum_prop": 0.5887241689128482,
"repo_name": "lucianopuccio/golem",
"id": "95310f3a1974ecacb9b4d9f102d8341e940c7c7f",
"size": "8904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/report/test_report_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19253"
},
{
"name": "HTML",
"bytes": "72727"
},
{
"name": "JavaScript",
"bytes": "119483"
},
{
"name": "Python",
"bytes": "520538"
}
],
"symlink_target": ""
} |
from JumpScale import j
class GitClient:
def __init__(self, baseDir, check_path=True): # NOQA
self._repo = None
if not j.sal.fs.exists(path=baseDir):
raise j.exceptions.Input("git repo on %s not found." % baseDir)
# split path to find parts
baseDir = j.sal.fs.pathClean(baseDir)
baseDir = baseDir.replace("\\", "/") # NOQA
baseDir = baseDir.rstrip("/")
while ".git" not in j.sal.fs.listDirsInDir(
baseDir, recursive=False, dirNameOnly=True, findDirectorySymlinks=True):
baseDir = j.sal.fs.getParent(baseDir)
if baseDir == "/":
break
baseDir = baseDir.rstrip("/")
if baseDir.strip() == "":
raise j.exceptions.RuntimeError("could not find basepath for .git in %s" % baseDir)
if check_path:
if baseDir.find("/code/") == -1:
raise j.exceptions.Input("jumpscale code management always requires path in form of $somewhere/code/$type/$account/$reponame")
base = baseDir.split("/code/", 1)[1]
if not base.startswith('cockpit'):
if base.count("/") != 2:
raise j.exceptions.Input("jumpscale code management always requires path in form of $somewhere/code/$type/$account/$reponame")
self.type, self.account, self.name = base.split("/", 2)
else:
self.type, self.account, self.name = 'github', 'cockpit', 'cockpit'
else:
self.type, self.account, self.name = '', '', j.sal.fs.getBaseName(baseDir)
self.baseDir = baseDir
# if len(self.repo.remotes) != 1:
# raise j.exceptions.Input("git repo on %s is corrupt could not find remote url" % baseDir)
def __repr__(self):
return str(self.__dict__)
def __str__(self):
return self.__repr__()
@property
def remoteUrl(self):
if len(self.repo.remotes) <= 0:
raise j.exceptions.Input("There is not remote configured for this repository")
return self.repo.remotes[0].url
@property
def branchName(self):
return self.repo.git.rev_parse('HEAD', abbrev_ref=True)
@property
def repo(self):
# Load git when we absolutly need it cause it does not work in gevent
# mode
import git
if not self._repo:
if not j.sal.fs.exists(self.baseDir):
j.tools.cuisine.local.core.run(
"git config --global http.sslVerify false")
self._clone()
else:
self._repo = git.Repo(self.baseDir)
return self._repo
def init(self):
self.repo
def getBranchOrTag(self):
try:
return 'tag', self.repo.git.describe('--tags')
except:
return 'branch', self.branchName
def switchBranch(self, branchName, create=True): # NOQA
if create:
import git
try:
self.repo.git.branch(branchName)
except git.GitCommandError:
# probably branch exists.
pass
self.repo.git.checkout(branchName)
def checkFilesWaitingForCommit(self):
res = self.getModifiedFiles()
if res["D"] != []:
return True
if res["M"] != []:
return True
if res["N"] != []:
return True
if res["R"] != []:
return True
def hasModifiedFiles(self):
cmd = "cd %s;git status --porcelain" % self.baseDir
rc, out, err = j.tools.cuisine.local.core.run(cmd, die=False)
for item in out.split("\n"):
item = item.strip()
if item == '':
continue
return True
return False
def getModifiedFiles(self, collapse=False, ignore=[]):
result = {}
result["D"] = []
result["N"] = []
result["M"] = []
result["R"] = []
def checkignore(ignore, path):
for item in ignore:
if path.find(item) != -1:
return True
return False
cmd = "cd %s;git status --porcelain" % self.baseDir
rc, out, err = j.tools.cuisine.local.core.run(cmd)
for item in out.split("\n"):
item = item.strip()
if item == '':
continue
state, _, _file = item.partition(" ")
if state == '??':
if checkignore(ignore, _file):
continue
result["N"].append(_file)
if state in ["D", "N", "R", "M"]:
if checkignore(ignore, _file):
continue
if _file not in result[state]:
result[state].append(_file)
for diff in self.repo.index.diff(None):
# TODO: does not work, did not show my changes !!! *1
path = diff.a_blob.path
if checkignore(ignore, path):
continue
if diff.deleted_file:
if path not in result["D"]:
result["D"].append(path)
elif diff.new_file:
if path not in result["N"]:
result["N"].append(path)
elif diff.renamed:
if path not in result["R"]:
result["R"].append(path)
else:
if path not in result["M"]:
result["M"].append(path)
if collapse:
result = result["N"] + result["M"] + result["R"] + result["D"]
return result
def getUntrackedFiles(self):
return self.repo.untracked_files
def checkout(self, path):
cmd = 'cd %s;git checkout %s' % (self.baseDir, path)
j.tools.cuisine.local.core.run(cmd)
def addRemoveFiles(self):
cmd = 'cd %s;git add -A :/' % self.baseDir
j.tools.cuisine.local.core.run(cmd)
def addFiles(self, files=[]):
if files != []:
self.repo.index.add(files)
def removeFiles(self, files=[]):
if files != []:
self.repo.index.remove(files)
def pull(self):
self.repo.git.pull()
def fetch(self):
self.repo.git.fetch()
def commit(self, message='', addremove=True):
if addremove:
self.addRemoveFiles()
if self.hasModifiedFiles() is False:
print("no need to commit, no changed files")
return
return self.repo.index.commit(message)
def push(self, force=False):
if force:
self.repo.git.push('-f')
else:
self.repo.git.push('--all')
def getChangedFiles(self, fromref='', toref='', fromepoch=None, toepoch=None, author=None, paths=[]):
"""
list all changed files since ref & epoch (use both)
@param fromref = commit ref to start from
@param toref = commit ref to end at
@param author if limited to author
@param path if only list changed files in paths
@param fromepoch = starting epoch
@param toepoch = ending epoch
@return
"""
commits = self.getCommitRefs(fromref=fromref, toref=toref, fromepoch=fromepoch,
toepoch=toepoch, author=author, paths=paths, files=True)
files = [f for commit in commits for f in commit[3]]
return list(set(files))
def getCommitRefs(self, fromref='', toref='', fromepoch=None, toepoch=None, author=None, paths=None, files=False):
"""
@return [[$epoch, $ref, $author]] if no files (default)
@return [[$epoch, $ref, $author, $files]] if files
@param files = True means will list the files
"""
kwargs = {'branches': [self.branchName]}
if fromepoch:
kwargs["max-age"] = fromepoch
if toepoch:
kwargs['min-age'] = toepoch
if fromref or toref:
if fromref and not toref:
kwargs['rev'] = '%s' % fromref
elif fromref and toref:
kwargs['rev'] = '%s..%s' % (fromref, toref)
if author:
kwargs['author'] = author
commits = list()
for commit in list(self.repo.iter_commits(paths=paths, **kwargs)):
if files:
commits.append((commit.authored_date, commit.hexsha,
commit.author.name, list(commit.stats.files.keys())))
else:
commits.append(
(commit.authored_date, commit.hexsha, commit.author.name))
return commits
def getFileChanges(self, path):
"""
@return lines which got changed
format:
{'line': [{'commit sha': '', 'author': 'author'}]}
"""
# TODO *3 limit to max number?
diffs = dict()
blame = self.repo.blame(self.branchName, path)
for commit, lines in blame:
for line in lines:
diffs[line] = list() if line not in diffs else diffs[line]
diffs[line].append(
{'author': commit.author.name, 'commit': commit.hexsha})
return diffs
def patchGitignore(self):
gitignore = '''# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
develop-eggs/
eggs/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
# Sphinx documentation
docs/_build/
'''
ignorefilepath = j.sal.fs.joinPaths(self.baseDir, '.gitignore')
if not j.sal.fs.exists(ignorefilepath):
j.sal.fs.writeFile(ignorefilepath, gitignore)
else:
lines = gitignore.splitlines()
inn = j.sal.fs.fileGetContents(ignorefilepath)
lines = inn.splitlines()
linesout = []
for line in lines:
if line.strip():
linesout.append(line)
for line in lines:
if line not in lines and line.strip():
linesout.append(line)
out = '\n'.join(linesout)
if out.strip() != inn.strip():
j.sal.fs.writeFile(ignorefilepath, out)
def describe(self):
"""
this method get latest tag or branch
"""
try:
cmd = 'cd {path}; git describe --tags'.format(path=self.baseDir)
return 'tag', j.tools.cuisine.local.core.run(cmd)[1]
except:
return 'branch', self.repo.head.ref.name
| {
"content_hash": "7f8638d305fec2c61c26534dcb2595b2",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 146,
"avg_line_length": 31.489795918367346,
"alnum_prop": 0.5334691232293306,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "98591ee943d0ae66c585bdf51fd5a6711266b503",
"size": "10801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/clients/git/GitClient.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
} |
"""Support for representing current time of the day as binary sensors."""
from datetime import datetime, timedelta
import logging
import pytz
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_AFTER,
CONF_BEFORE,
CONF_NAME,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.sun import get_astral_event_date, get_astral_event_next
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_AFTER = "after"
ATTR_BEFORE = "before"
ATTR_NEXT_UPDATE = "next_update"
CONF_AFTER_OFFSET = "after_offset"
CONF_BEFORE_OFFSET = "before_offset"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AFTER): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_BEFORE): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_AFTER_OFFSET, default=timedelta(0)): cv.time_period,
vol.Optional(CONF_BEFORE_OFFSET, default=timedelta(0)): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ToD sensors."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return
after = config[CONF_AFTER]
after_offset = config[CONF_AFTER_OFFSET]
before = config[CONF_BEFORE]
before_offset = config[CONF_BEFORE_OFFSET]
name = config[CONF_NAME]
sensor = TodSensor(name, after, after_offset, before, before_offset)
async_add_entities([sensor])
def is_sun_event(event):
"""Return true if event is sun event not time."""
return event in (SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET)
class TodSensor(BinarySensorEntity):
"""Time of the Day Sensor."""
def __init__(self, name, after, after_offset, before, before_offset):
"""Init the ToD Sensor..."""
self._name = name
self._time_before = self._time_after = self._next_update = None
self._after_offset = after_offset
self._before_offset = before_offset
self._before = before
self._after = after
@property
def should_poll(self):
"""Sensor does not need to be polled."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def after(self):
"""Return the timestamp for the beginning of the period."""
return self._time_after
@property
def before(self):
"""Return the timestamp for the end of the period."""
return self._time_before
@property
def is_on(self):
"""Return True is sensor is on."""
if self.after < self.before:
return self.after <= self.current_datetime < self.before
return False
@property
def current_datetime(self):
"""Return local current datetime according to hass configuration."""
return dt_util.utcnow()
@property
def next_update(self):
"""Return the next update point in the UTC time."""
return self._next_update
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_AFTER: self.after.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_BEFORE: self.before.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_NEXT_UPDATE: self.next_update.astimezone(
self.hass.config.time_zone
).isoformat(),
}
def _naive_time_to_utc_datetime(self, naive_time):
"""Convert naive time from config to utc_datetime with current day."""
# get the current local date from utc time
current_local_date = self.current_datetime.astimezone(
self.hass.config.time_zone
).date()
# calculate utc datetime corecponding to local time
utc_datetime = self.hass.config.time_zone.localize(
datetime.combine(current_local_date, naive_time)
).astimezone(tz=pytz.UTC)
return utc_datetime
def _calculate_initial_boudary_time(self):
"""Calculate internal absolute time boundaries."""
nowutc = self.current_datetime
# If after value is a sun event instead of absolute time
if is_sun_event(self._after):
# Calculate the today's event utc time or
# if not available take next
after_event_date = get_astral_event_date(
self.hass, self._after, nowutc
) or get_astral_event_next(self.hass, self._after, nowutc)
else:
# Convert local time provided to UTC today
# datetime.combine(date, time, tzinfo) is not supported
# in python 3.5. The self._after is provided
# with hass configured TZ not system wide
after_event_date = self._naive_time_to_utc_datetime(self._after)
self._time_after = after_event_date
# If before value is a sun event instead of absolute time
if is_sun_event(self._before):
# Calculate the today's event utc time or if not available take
# next
before_event_date = get_astral_event_date(
self.hass, self._before, nowutc
) or get_astral_event_next(self.hass, self._before, nowutc)
# Before is earlier than after
if before_event_date < after_event_date:
# Take next day for before
before_event_date = get_astral_event_next(
self.hass, self._before, after_event_date
)
else:
# Convert local time provided to UTC today, see above
before_event_date = self._naive_time_to_utc_datetime(self._before)
# It is safe to add timedelta days=1 to UTC as there is no DST
if before_event_date < after_event_date + self._after_offset:
before_event_date += timedelta(days=1)
self._time_before = before_event_date
# We are calculating the _time_after value assuming that it will happen today
# But that is not always true, e.g. after 23:00, before 12:00 and now is 10:00
# If _time_before and _time_after are ahead of current_datetime:
# _time_before is set to 12:00 next day
# _time_after is set to 23:00 today
# current_datetime is set to 10:00 today
if (
self._time_after > self.current_datetime
and self._time_before > self.current_datetime + timedelta(days=1)
):
# remove one day from _time_before and _time_after
self._time_after -= timedelta(days=1)
self._time_before -= timedelta(days=1)
# Add offset to utc boundaries according to the configuration
self._time_after += self._after_offset
self._time_before += self._before_offset
def _turn_to_next_day(self):
"""Turn to to the next day."""
if is_sun_event(self._after):
self._time_after = get_astral_event_next(
self.hass, self._after, self._time_after - self._after_offset
)
self._time_after += self._after_offset
else:
# Offset is already there
self._time_after += timedelta(days=1)
if is_sun_event(self._before):
self._time_before = get_astral_event_next(
self.hass, self._before, self._time_before - self._before_offset
)
self._time_before += self._before_offset
else:
# Offset is already there
self._time_before += timedelta(days=1)
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
self._calculate_initial_boudary_time()
self._calculate_next_update()
self._point_in_time_listener(dt_util.now())
def _calculate_next_update(self):
"""Datetime when the next update to the state."""
now = self.current_datetime
if now < self.after:
self._next_update = self.after
return
if now < self.before:
self._next_update = self.before
return
self._turn_to_next_day()
self._next_update = self.after
@callback
def _point_in_time_listener(self, now):
"""Run when the state of the sensor should be updated."""
self._calculate_next_update()
self.async_write_ha_state()
async_track_point_in_utc_time(
self.hass, self._point_in_time_listener, self.next_update
)
| {
"content_hash": "b0ffd2aba32ce709c78769ac81283da9",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 88,
"avg_line_length": 37.02074688796681,
"alnum_prop": 0.6195920197265187,
"repo_name": "robbiet480/home-assistant",
"id": "8a5bbf16c6cfa0991d209b2acb770bac20bf902e",
"size": "8922",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tod/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
"""
Udacity CS253 - Lessen 1 - Homework 1
"""
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, Udacity!')
application = webapp2.WSGIApplication([('/', MainPage),], debug=True) | {
"content_hash": "f6fa5b29863c32677e53147b2196c93a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 26.454545454545453,
"alnum_prop": 0.697594501718213,
"repo_name": "vcelis/cs253",
"id": "c35b8967f65b2aff641acbd17c01334542e25085",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lesson1/homework1/hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5226"
},
{
"name": "Python",
"bytes": "29678"
}
],
"symlink_target": ""
} |
import traceback
try:
from hashlib import md5
except ImportError:
from md5 import md5
from datetime import datetime, timedelta
class RateLimitFilter(object):
_errors = {}
def filter(self, record):
from django.conf import settings
from django.core.cache import cache
# Track duplicate errors
duplicate = False
rate = getattr(settings, 'ERROR_RATE_LIMIT', 10) # seconds
if rate > 0:
if record.exc_info is not None:
tb = '\n'.join(traceback.format_exception(*record.exc_info))
else:
tb = '{} {} {}'.format(record.name, record.msg, record.args)
key = md5(tb).hexdigest()
prefix = getattr(settings, 'ERROR_RATE_CACHE_PREFIX', 'ERROR_RATE')
# Test if the cache works
cache_key = '%s_%s' % (prefix, key)
try:
cache.set(prefix, 1, 1)
use_cache = cache.get(prefix) == 1
except:
use_cache = False
# debugging purposes...
#print "Error Rate Limit: {} Error Prefix: {} Use Cache: {} Key: {}".format(rate, prefix, use_cache, key)
if use_cache:
duplicate = cache.get(cache_key) == 1
if not duplicate:
cache.set(cache_key, 1, rate)
else:
min_date = datetime.now() - timedelta(seconds=rate)
max_keys = getattr(settings, 'ERROR_RATE_KEY_LIMIT', 100)
duplicate = (key in self._errors and self._errors[key] >= min_date)
self._errors = dict(filter(lambda x: x[1] >= min_date,
sorted(self._errors.items(),
key=lambda x: x[1]))[0-max_keys:])
if not duplicate:
self._errors[key] = datetime.now()
# debugging purposes...
#print "Duplicate: {}".format(duplicate)
return not duplicate
| {
"content_hash": "3f420ea7deb4d8381c661cc87ec3f7a9",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 117,
"avg_line_length": 36.89473684210526,
"alnum_prop": 0.4940561103185925,
"repo_name": "MadeInHaus/django-social",
"id": "6e04f36b964c0aceff5a8a689aeda03d59d41142",
"size": "2168",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example/SocialExample/project/apps/utils/error_ratelimit_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16921"
},
{
"name": "JavaScript",
"bytes": "430797"
},
{
"name": "Pascal",
"bytes": "49"
},
{
"name": "Perl",
"bytes": "41847"
},
{
"name": "Puppet",
"bytes": "25935"
},
{
"name": "Python",
"bytes": "376144"
},
{
"name": "Ruby",
"bytes": "192409"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest
import PRESUBMIT
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..',
'..', '..'))
from PRESUBMIT_test_mocks import (MockInputApi, MockOutputApi, MockAffectedFile)
class AccessibilityEventsTestIncludesAndroidTest(unittest.TestCase):
# Test that no warning is raised when the Android file is also modified.
def testAndroidChangeIncluded(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='A'),
MockAffectedFile(
'accessibility/WebContentsAccessibilityEventsTest.java',
[''], action='M')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that a warning is raised when the Android file is not modified.
def testAndroidChangeMissing(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='A'),
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(msgs),
'Expected %d messages, found %d: %s'
% (1, len(msgs), msgs))
# Test that Android change is not required when no html file is added/removed.
def testIgnoreNonHtmlFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.txt',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.cc',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.h',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.py',
[''], action='A')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that Android change is not required for unrelated html files.
def testIgnoreNonRelatedHtmlFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/aria/foo.html',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/html/foo.html',
[''], action='A'),
MockAffectedFile('chrome/tests/data/accessibility/foo.html',
[''], action='A')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that only modifying an html file will not trigger the warning.
def testIgnoreModifiedFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='M')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that deleting an html file will trigger the warning.
def testAndroidChangeMissingOnDeletedFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[], action='D')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(msgs),
'Expected %d messages, found %d: %s'
% (1, len(msgs), msgs))
if __name__ == '__main__':
unittest.main() | {
"content_hash": "f0de2f16afe4bd647d4952eb90943d80",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 80,
"avg_line_length": 35.85470085470085,
"alnum_prop": 0.6255065554231227,
"repo_name": "ric2b/Vivaldi-browser",
"id": "6dc43c2b6298c6dc4ced6a690601e861b0151092",
"size": "4380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/content/test/data/accessibility/PRESUBMIT_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import functools
import itertools
import math
from typing import Callable, cast, Dict, Iterable, List, Optional, Sequence, Tuple, Type
import cirq
import stim
def cirq_circuit_to_stim_circuit(
circuit: cirq.AbstractCircuit, *, qubit_to_index_dict: Optional[Dict[cirq.Qid, int]] = None
) -> stim.Circuit:
"""Converts a cirq circuit into an equivalent stim circuit.
Not all circuits can be converted. In order for a circuit to be convertible, all of its operations must be
convertible.
An operation is convertible if:
- It is a stabilizer gate or probabilistic Pauli gate from cirq
- cirq.H
- cirq.S
- cirq.X
- cirq.X**0.5
- cirq.CNOT
- cirq.ResetChannel()
- cirq.X.with_probability(p)
- cirq.DepolarizingChannel(p, n_qubits=1 or 2)
- etc
- Or it has a _decompose_ method that yields convertible operations.
- Or it has a correctly implemented _stim_conversion_ method.
Args:
circuit: The circuit to convert.
qubit_to_index_dict: Optional. Which integer each qubit should get mapped to. If not specified, defaults to
indexing qubits in the circuit in sorted order.
Returns:
The converted circuit.
Examples:
>>> import cirq, stimcirq
>>> a = cirq.NamedQubit("zero")
>>> b = cirq.NamedQubit("two")
>>> stimcirq.cirq_circuit_to_stim_circuit(cirq.Circuit(
... cirq.Moment(cirq.H(a)),
... cirq.Moment(cirq.CNOT(a, b)),
... cirq.Moment(
... cirq.X(a).with_probability(0.25),
... cirq.Z(b).with_probability(0.25),
... ),
... cirq.Moment(),
... cirq.Moment(),
... cirq.Moment(cirq.DepolarizingChannel(0.125, n_qubits=2).on(b, a)),
... cirq.Moment(cirq.measure(a, b)),
... ), qubit_to_index_dict={a: 0, b: 2})
stim.Circuit('''
H 0
TICK
CX 0 2
TICK
X_ERROR(0.25) 0
Z_ERROR(0.25) 2
TICK
TICK
TICK
DEPOLARIZE2(0.125) 2 0
TICK
M 0 2
TICK
''')
Here is an example of a _stim_conversion_ method:
def _stim_conversion_(
self,
# The stim circuit being built. Add onto it.
edit_circuit: stim.Circuit,
# Metadata about measurement groupings needed by stimcirq.StimSampler.
# If your gate contains a measurement, it has to append how many qubits
# that measurement measures (and its key) into this list.
edit_measurement_key_lengths: List[Tuple[str, int]],
# The indices of qubits the gate is operating on.
targets: List[int],
# Forward compatibility with future arguments.
**kwargs):
edit_circuit.append_operation("H", targets)
"""
return cirq_circuit_to_stim_data(circuit, q2i=qubit_to_index_dict, flatten=False)[0]
def cirq_circuit_to_stim_data(
circuit: cirq.AbstractCircuit, *, q2i: Optional[Dict[cirq.Qid, int]] = None, flatten: bool = False,
) -> Tuple[stim.Circuit, List[Tuple[str, int]]]:
"""Converts a Cirq circuit into a Stim circuit and also metadata about where measurements go."""
if q2i is None:
q2i = {q: i for i, q in enumerate(sorted(circuit.all_qubits()))}
helper = CirqToStimHelper()
helper.q2i = q2i
helper.flatten = flatten
for q in sorted(circuit.all_qubits()):
if isinstance(q, cirq.LineQubit):
i = q2i[q]
if i != q.x:
helper.out.append_operation("QUBIT_COORDS", [i], [q.x])
elif isinstance(q, cirq.GridQubit):
helper.out.append_operation("QUBIT_COORDS", [q2i[q]], [q.row, q.col])
helper.process_moments(circuit)
return helper.out, helper.key_out
StimTypeHandler = Callable[[stim.Circuit, cirq.Gate, List[int]], None]
@functools.lru_cache(maxsize=1)
def gate_to_stim_append_func() -> Dict[cirq.Gate, Callable[[stim.Circuit, List[int]], None]]:
"""A dictionary mapping specific gate instances to stim circuit appending functions."""
x = (cirq.X, False)
y = (cirq.Y, False)
z = (cirq.Z, False)
nx = (cirq.X, True)
ny = (cirq.Y, True)
nz = (cirq.Z, True)
def do_nothing(c, t):
pass
def use(
*gates: str, individuals: Sequence[Tuple[str, int]] = ()
) -> Callable[[stim.Circuit, List[int]], None]:
if len(gates) == 1 and not individuals:
(g,) = gates
return lambda c, t: c.append_operation(g, t)
if not individuals:
def do(c, t):
for g in gates:
c.append_operation(g, t)
else:
def do(c, t):
for g in gates:
c.append_operation(g, t)
for g, k in individuals:
c.append_operation(g, [t[k]])
return do
sqcg = cirq.SingleQubitCliffordGate.from_xz_map
paulis = cast(List[cirq.Pauli], [cirq.X, cirq.Y, cirq.Z])
return {
cirq.ResetChannel(): use("R"),
# Identities.
cirq.I: use("I"),
cirq.H ** 0: do_nothing,
cirq.X ** 0: do_nothing,
cirq.Y ** 0: do_nothing,
cirq.Z ** 0: do_nothing,
cirq.ISWAP ** 0: do_nothing,
cirq.SWAP ** 0: do_nothing,
# Common named gates.
cirq.H: use("H"),
cirq.X: use("X"),
cirq.Y: use("Y"),
cirq.Z: use("Z"),
cirq.X ** 0.5: use("SQRT_X"),
cirq.X ** -0.5: use("SQRT_X_DAG"),
cirq.Y ** 0.5: use("SQRT_Y"),
cirq.Y ** -0.5: use("SQRT_Y_DAG"),
cirq.Z ** 0.5: use("SQRT_Z"),
cirq.Z ** -0.5: use("SQRT_Z_DAG"),
cirq.CNOT: use("CNOT"),
cirq.CZ: use("CZ"),
cirq.ISWAP: use("ISWAP"),
cirq.ISWAP ** -1: use("ISWAP_DAG"),
cirq.ISWAP ** 2: use("Z"),
cirq.SWAP: use("SWAP"),
cirq.X.controlled(1): use("CX"),
cirq.Y.controlled(1): use("CY"),
cirq.Z.controlled(1): use("CZ"),
cirq.XX ** 0.5: use("SQRT_XX"),
cirq.YY ** 0.5: use("SQRT_YY"),
cirq.ZZ ** 0.5: use("SQRT_ZZ"),
cirq.XX ** -0.5: use("SQRT_XX_DAG"),
cirq.YY ** -0.5: use("SQRT_YY_DAG"),
cirq.ZZ ** -0.5: use("SQRT_ZZ_DAG"),
# All 24 cirq.SingleQubitCliffordGate instances.
sqcg(x, y): use("SQRT_X_DAG"),
sqcg(x, ny): use("SQRT_X"),
sqcg(nx, y): use("H_YZ"),
sqcg(nx, ny): use("H_YZ", "X"),
sqcg(x, z): do_nothing,
sqcg(x, nz): use("X"),
sqcg(nx, z): use("Z"),
sqcg(nx, nz): use("Y"),
sqcg(y, x): use("C_XYZ"),
sqcg(y, nx): use("S", "SQRT_Y_DAG"),
sqcg(ny, x): use("S_DAG", "SQRT_Y"),
sqcg(ny, nx): use("S_DAG", "SQRT_Y_DAG"),
sqcg(y, z): use("S"),
sqcg(y, nz): use("H_XY"),
sqcg(ny, z): use("S_DAG"),
sqcg(ny, nz): use("H_XY", "Z"),
sqcg(z, x): use("H"),
sqcg(z, nx): use("SQRT_Y_DAG"),
sqcg(nz, x): use("SQRT_Y"),
sqcg(nz, nx): use("H", "Y"),
sqcg(z, y): use("C_ZYX"),
sqcg(z, ny): use("SQRT_Y_DAG", "S"),
sqcg(nz, y): use("SQRT_Y", "S"),
sqcg(nz, ny): use("SQRT_Y", "S_DAG"),
# All 36 cirq.PauliInteractionGate instances.
**{
cirq.PauliInteractionGate(p0, s0, p1, s1): use(
f"{p0}C{p1}", individuals=[(str(p1), 1)] * s0 + [(str(p0), 0)] * s1
)
for p0, s0, p1, s1 in itertools.product(paulis, [False, True], repeat=2)
},
}
@functools.lru_cache()
def gate_type_to_stim_append_func() -> Dict[Type[cirq.Gate], StimTypeHandler]:
"""A dictionary mapping specific gate types to stim circuit appending functions."""
return {
cirq.ControlledGate: cast(StimTypeHandler, _stim_append_controlled_gate),
cirq.DensePauliString: cast(StimTypeHandler, _stim_append_dense_pauli_string_gate),
cirq.MutableDensePauliString: cast(StimTypeHandler, _stim_append_dense_pauli_string_gate),
cirq.AsymmetricDepolarizingChannel: cast(
StimTypeHandler, _stim_append_asymmetric_depolarizing_channel
),
cirq.BitFlipChannel: lambda c, g, t: c.append_operation(
"X_ERROR", t, cast(cirq.BitFlipChannel, g).p
),
cirq.PhaseFlipChannel: lambda c, g, t: c.append_operation(
"Z_ERROR", t, cast(cirq.PhaseFlipChannel, g).p
),
cirq.PhaseDampingChannel: lambda c, g, t: c.append_operation(
"Z_ERROR", t, 0.5 - math.sqrt(1 - cast(cirq.PhaseDampingChannel, g).gamma) / 2
),
cirq.RandomGateChannel: cast(StimTypeHandler, _stim_append_random_gate_channel),
cirq.DepolarizingChannel: cast(StimTypeHandler, _stim_append_depolarizing_channel),
}
def _stim_append_measurement_gate(
circuit: stim.Circuit, gate: cirq.MeasurementGate, targets: List[int]
):
for i, b in enumerate(gate.invert_mask):
if b:
targets[i] = stim.target_inv(targets[i])
circuit.append_operation("M", targets)
def _stim_append_pauli_measurement_gate(
circuit: stim.Circuit, gate: cirq.PauliMeasurementGate, targets: List[int]
):
obs: cirq.DensePauliString = gate.observable()
# Convert to stim Pauli product targets.
if len(targets) == 0:
raise NotImplementedError(f"len(targets)={len(targets)} == 0")
new_targets = []
for t, p in zip(targets, obs.pauli_mask):
if p == 1:
t = stim.target_x(t)
elif p == 2:
t = stim.target_y(t)
elif p == 3:
t = stim.target_z(t)
else:
raise NotImplementedError(f"obs={obs!r}")
new_targets.append(t)
new_targets.append(stim.target_combiner())
new_targets.pop()
# Inverted result?
if obs.coefficient == -1:
new_targets[0] |= stim.target_inv(new_targets[0])
pass
elif obs.coefficient != 1:
raise NotImplementedError(f"obs.coefficient={obs.coefficient!r} not in [1, -1]")
circuit.append_operation("MPP", new_targets)
def _stim_append_dense_pauli_string_gate(
c: stim.Circuit, g: cirq.BaseDensePauliString, t: List[int]
):
gates = [None, "X", "Y", "Z"]
for p, k in zip(g.pauli_mask, t):
if p:
c.append_operation(gates[p], [k])
def _stim_append_asymmetric_depolarizing_channel(
c: stim.Circuit, g: cirq.AsymmetricDepolarizingChannel, t: List[int]
):
c.append_operation("PAULI_CHANNEL_1", t, [g.p_x, g.p_y, g.p_z])
def _stim_append_depolarizing_channel(c: stim.Circuit, g: cirq.DepolarizingChannel, t: List[int]):
if g.num_qubits() == 1:
c.append_operation("DEPOLARIZE1", t, g.p)
elif g.num_qubits() == 2:
c.append_operation("DEPOLARIZE2", t, g.p)
else:
raise TypeError(f"Don't know how to turn {g!r} into Stim operations.")
def _stim_append_controlled_gate(c: stim.Circuit, g: cirq.ControlledGate, t: List[int]):
if isinstance(g.sub_gate, cirq.BaseDensePauliString) and g.num_controls() == 1:
gates = [None, "CX", "CY", "CZ"]
for p, k in zip(g.sub_gate.pauli_mask, t[1:]):
if p:
c.append_operation(gates[p], [t[0], k])
if g.sub_gate.coefficient == 1j:
c.append_operation("S", t[:1])
elif g.sub_gate.coefficient == -1:
c.append_operation("Z", t[:1])
elif g.sub_gate.coefficient == -1j:
c.append_operation("S_DAG", t[:1])
elif g.sub_gate.coefficient == 1:
pass
else:
raise TypeError(f"Phase kickback from {g!r} isn't a stabilizer operation.")
return
raise TypeError(f"Don't know how to turn controlled gate {g!r} into Stim operations.")
def _stim_append_random_gate_channel(c: stim.Circuit, g: cirq.RandomGateChannel, t: List[int]):
if g.sub_gate in [cirq.X, cirq.Y, cirq.Z]:
c.append_operation(f"{g.sub_gate}_ERROR", t, g.probability)
elif isinstance(g.sub_gate, cirq.DensePauliString):
target_p = [None, stim.target_x, stim.target_y, stim.target_z]
pauli_targets = [target_p[p](t) for t, p in zip(t, g.sub_gate.pauli_mask) if p]
c.append_operation(f"CORRELATED_ERROR", pauli_targets, g.probability)
else:
raise NotImplementedError(
f"Don't know how to turn probabilistic {g!r} into Stim operations."
)
class CirqToStimHelper:
def __init__(self):
self.key_out: List[Tuple[str, int]] = []
self.out = stim.Circuit()
self.q2i = {}
self.have_seen_loop = False
self.flatten = False
def process_circuit_operation_into_repeat_block(self, op: cirq.CircuitOperation) -> None:
if self.flatten or op.repetitions == 1:
self.process_operations(cirq.decompose_once(op))
return
child = CirqToStimHelper()
child.key_out = self.key_out
child.q2i = self.q2i
child.have_seen_loop = True
self.have_seen_loop = True
child.process_moments(op.transform_qubits(lambda q: op.qubit_map.get(q, q)).circuit)
self.out += child.out * op.repetitions
def process_operations(self, operations: Iterable[cirq.Operation]) -> None:
g2f = gate_to_stim_append_func()
t2f = gate_type_to_stim_append_func()
for op in operations:
assert isinstance(op, cirq.Operation)
gate = op.gate
targets = [self.q2i[q] for q in op.qubits]
custom_method = getattr(
op, '_stim_conversion_', getattr(gate, '_stim_conversion_', None)
)
if custom_method is not None:
custom_method(
dont_forget_your_star_star_kwargs=True,
edit_circuit=self.out,
edit_measurement_key_lengths=self.key_out,
targets=targets,
have_seen_loop=self.have_seen_loop,
)
continue
if isinstance(op, cirq.CircuitOperation):
self.process_circuit_operation_into_repeat_block(op)
continue
# Special case measurement, because of its metadata.
if isinstance(gate, cirq.PauliMeasurementGate):
self.key_out.append((gate.key, len(targets)))
_stim_append_pauli_measurement_gate(self.out, gate, targets)
continue
if isinstance(gate, cirq.MeasurementGate):
self.key_out.append((gate.key, len(targets)))
_stim_append_measurement_gate(self.out, gate, targets)
continue
# Look for recognized gate values like cirq.H.
val_append_func = g2f.get(gate)
if val_append_func is not None:
val_append_func(self.out, targets)
continue
# Look for recognized gate types like cirq.DepolarizingChannel.
type_append_func = t2f.get(type(gate))
if type_append_func is not None:
type_append_func(self.out, gate, targets)
continue
# Ask unrecognized operations to decompose themselves into simpler operations.
try:
self.process_operations(cirq.decompose_once(op))
except TypeError as ex:
raise TypeError(
f"Don't know how to translate {op!r} into stim gates.\n"
f"- It doesn't have a _decompose_ method that returns stim-compatible operations.\n"
f"- It doesn't have a _stim_conversion_ method.\n"
) from ex
def process_moment(self, moment: cirq.Moment):
length_before = len(self.out)
self.process_operations(moment)
# Append a TICK, unless it was already handled by an internal REPEAT block.
if length_before == len(self.out) or not isinstance(self.out[-1], stim.CircuitRepeatBlock):
self.out.append_operation("TICK", [])
def process_moments(self, moments: Iterable[cirq.Moment]):
for moment in moments:
self.process_moment(moment)
| {
"content_hash": "7cba043881f4a131c6cfd3ae1455d860",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 115,
"avg_line_length": 37.01809954751131,
"alnum_prop": 0.5628285050727295,
"repo_name": "quantumlib/Stim",
"id": "df61d8a5c3021954cc4a3be6d9e5f2a75863ab54",
"size": "16362",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "glue/cirq/stimcirq/_cirq_to_stim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4500"
},
{
"name": "C++",
"bytes": "2703579"
},
{
"name": "CMake",
"bytes": "4590"
},
{
"name": "HTML",
"bytes": "8333"
},
{
"name": "JavaScript",
"bytes": "14013"
},
{
"name": "Python",
"bytes": "877557"
},
{
"name": "Shell",
"bytes": "4765"
},
{
"name": "Starlark",
"bytes": "3470"
}
],
"symlink_target": ""
} |
"""Blink frame presubmit script
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
USE_PYTHON3 = True
def _RunUmaHistogramChecks(input_api, output_api): # pylint: disable=C0103
import sys
original_sys_path = sys.path
try:
sys.path = sys.path + [
input_api.os_path.join(input_api.PresubmitLocalPath(), '..', '..',
'..', '..', '..', 'tools', 'metrics',
'histograms')
]
import update_histogram_enum # pylint: disable=F0401
finally:
sys.path = original_sys_path
for f in input_api.AffectedFiles():
if f.LocalPath().endswith('web_feature.mojom'):
break
else:
return []
source_path = 'third_party/blink/public/mojom/web_feature/web_feature.mojom'
start_marker = '^enum WebFeature {'
end_marker = '^kNumberOfFeatures'
presubmit_error = update_histogram_enum.CheckPresubmitErrors(
histogram_enum_name='FeatureObserver',
update_script_name='update_use_counter_feature_enum.py',
source_enum_path=source_path,
start_marker=start_marker,
end_marker=end_marker,
strip_k_prefix=True)
if presubmit_error:
return [
output_api.PresubmitPromptWarning(
presubmit_error, items=[source_path])
]
return []
def CheckChangeOnUpload(input_api, output_api): # pylint: disable=C0103
results = []
results.extend(_RunUmaHistogramChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api): # pylint: disable=C0103
results = []
results.extend(_RunUmaHistogramChecks(input_api, output_api))
return results
| {
"content_hash": "cdfc27e758b38709b777b7077c5a3090",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 31.842105263157894,
"alnum_prop": 0.6258953168044077,
"repo_name": "ric2b/Vivaldi-browser",
"id": "3e18b11a0b508f18400eaebaabeb43b766b9ec0a",
"size": "1977",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/third_party/blink/public/mojom/web_feature/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import json
import stubout
import webob
from nova import compute
from nova import context
from nova import test
from nova.tests.api.openstack import fakes
last_add_fixed_ip = (None, None)
last_remove_fixed_ip = (None, None)
def compute_api_add_fixed_ip(self, context, instance_id, network_id):
global last_add_fixed_ip
last_add_fixed_ip = (instance_id, network_id)
def compute_api_remove_fixed_ip(self, context, instance_id, address):
global last_remove_fixed_ip
last_remove_fixed_ip = (instance_id, address)
class FixedIpTest(test.TestCase):
def setUp(self):
super(FixedIpTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(compute.api.API, "add_fixed_ip",
compute_api_add_fixed_ip)
self.stubs.Set(compute.api.API, "remove_fixed_ip",
compute_api_remove_fixed_ip)
self.context = context.get_admin_context()
def test_add_fixed_ip(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict(networkId='test_net'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
self.assertEqual(last_add_fixed_ip, ('test_inst', 'test_net'))
def test_add_fixed_ip_no_network(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict())
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 422)
self.assertEqual(last_add_fixed_ip, (None, None))
def test_remove_fixed_ip(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict(address='10.10.10.1'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
self.assertEqual(last_remove_fixed_ip, ('test_inst', '10.10.10.1'))
def test_remove_fixed_ip_no_address(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict())
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 422)
self.assertEqual(last_remove_fixed_ip, (None, None))
| {
"content_hash": "46d201b2286f02752466551ff79af1be",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 75,
"avg_line_length": 33.71739130434783,
"alnum_prop": 0.6318504190844616,
"repo_name": "nii-cloud/dodai-compute",
"id": "cecc4af4ffec778f306821f09521e6c5eae7bb55",
"size": "3732",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/contrib/test_multinic_xs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "4253758"
},
{
"name": "Shell",
"bytes": "42407"
}
],
"symlink_target": ""
} |
"""Accesses the google.monitoring.v3 MetricService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.api import metric_pb2 as api_metric_pb2
from google.cloud.gapic.monitoring.v3 import enums
from google.cloud.proto.monitoring.v3 import common_pb2
from google.cloud.proto.monitoring.v3 import metric_pb2 as v3_metric_pb2
from google.cloud.proto.monitoring.v3 import metric_service_pb2
_PageDesc = google.gax.PageDescriptor
class MetricServiceClient(object):
"""
Manages metric descriptors, monitored resource descriptors, and
time series data.
"""
SERVICE_ADDRESS = 'monitoring.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_PAGE_DESCRIPTORS = {
'list_monitored_resource_descriptors':
_PageDesc('page_token', 'next_page_token', 'resource_descriptors'),
'list_metric_descriptors': _PageDesc('page_token', 'next_page_token',
'metric_descriptors'),
'list_time_series': _PageDesc('page_token', 'next_page_token',
'time_series')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/monitoring',
'https://www.googleapis.com/auth/monitoring.read',
'https://www.googleapis.com/auth/monitoring.write', )
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
_METRIC_DESCRIPTOR_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/metricDescriptors/{metric_descriptor=**}')
_MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}'
)
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({'project': project, })
@classmethod
def metric_descriptor_path(cls, project, metric_descriptor):
"""Returns a fully-qualified metric_descriptor resource name string."""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.render({
'project': project,
'metric_descriptor': metric_descriptor,
})
@classmethod
def monitored_resource_descriptor_path(cls, project,
monitored_resource_descriptor):
"""Returns a fully-qualified monitored_resource_descriptor resource name string."""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.render({
'project': project,
'monitored_resource_descriptor': monitored_resource_descriptor,
})
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
@classmethod
def match_project_from_metric_descriptor_name(cls, metric_descriptor_name):
"""Parses the project from a metric_descriptor resource.
Args:
metric_descriptor_name (string): A fully-qualified path representing a metric_descriptor
resource.
Returns:
A string representing the project.
"""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.match(
metric_descriptor_name).get('project')
@classmethod
def match_metric_descriptor_from_metric_descriptor_name(
cls, metric_descriptor_name):
"""Parses the metric_descriptor from a metric_descriptor resource.
Args:
metric_descriptor_name (string): A fully-qualified path representing a metric_descriptor
resource.
Returns:
A string representing the metric_descriptor.
"""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.match(
metric_descriptor_name).get('metric_descriptor')
@classmethod
def match_project_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the project from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the project.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get('project')
@classmethod
def match_monitored_resource_descriptor_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the monitored_resource_descriptor from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the monitored_resource_descriptor.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get(
'monitored_resource_descriptor')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A MetricServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-monitoring-v3', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'metric_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.monitoring.v3.MetricService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers,
page_descriptors=self._PAGE_DESCRIPTORS, )
self.metric_service_stub = config.create_stub(
metric_service_pb2.MetricServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._list_monitored_resource_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMonitoredResourceDescriptors,
settings=defaults['list_monitored_resource_descriptors'])
self._get_monitored_resource_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMonitoredResourceDescriptor,
settings=defaults['get_monitored_resource_descriptor'])
self._list_metric_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMetricDescriptors,
settings=defaults['list_metric_descriptors'])
self._get_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMetricDescriptor,
settings=defaults['get_metric_descriptor'])
self._create_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.CreateMetricDescriptor,
settings=defaults['create_metric_descriptor'])
self._delete_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.DeleteMetricDescriptor,
settings=defaults['delete_metric_descriptor'])
self._list_time_series = api_callable.create_api_call(
self.metric_service_stub.ListTimeSeries,
settings=defaults['list_time_series'])
self._create_time_series = api_callable.create_api_call(
self.metric_service_stub.CreateTimeSeries,
settings=defaults['create_time_series'])
# Service calls
def list_monitored_resource_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_monitored_resource_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_monitored_resource_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): An optional `filter <https://cloud.google.com/monitoring/api/v3/filters>`_ describing
the descriptors to be returned. The filter can reference
the descriptor's type and labels. For example, the
following filter returns only Google Compute Engine descriptors
that have an ``id`` label:
::
resource.type = starts_with(\"gce_\") AND resource.label:id
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_monitored_resource_descriptors(request, options)
def get_monitored_resource_descriptor(self, name, options=None):
"""
Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.monitored_resource_descriptor_path('[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]')
>>> response = api.get_monitored_resource_descriptor(name)
Args:
name (string): The monitored resource descriptor to get. The format is
``\"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\"``.
The ``{resource_type}`` is a predefined type, such as
``cloudsql_database``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name)
return self._get_monitored_resource_descriptor(request, options)
def list_metric_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_metric_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_metric_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): If this field is empty, all custom and
system-defined metric descriptors are returned.
Otherwise, the `filter <https://cloud.google.com/monitoring/api/v3/filters>`_
specifies which metric descriptors are to be
returned. For example, the following filter matches all
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_:
::
metric.type = starts_with(\"custom.googleapis.com/\")
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.metric_pb2.MetricDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.ListMetricDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_metric_descriptors(request, options)
def get_metric_descriptor(self, name, options=None):
"""
Gets a single metric descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]')
>>> response = api.get_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example value of ``{metric_id}`` is
``\"compute.googleapis.com/instance/disk/read_bytes_count\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.GetMetricDescriptorRequest(name=name)
return self._get_metric_descriptor(request, options)
def create_metric_descriptor(self, name, metric_descriptor, options=None):
"""
Creates a new metric descriptor.
User-created metric descriptors define
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.api import metric_pb2 as api_metric_pb2
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> metric_descriptor = api_metric_pb2.MetricDescriptor()
>>> response = api.create_metric_descriptor(name, metric_descriptor)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
metric_descriptor (:class:`google.api.metric_pb2.MetricDescriptor`): The new `custom metric <https://cloud.google.com/monitoring/custom-metrics>`_
descriptor.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.CreateMetricDescriptorRequest(
name=name, metric_descriptor=metric_descriptor)
return self._create_metric_descriptor(request, options)
def delete_metric_descriptor(self, name, options=None):
"""
Deletes a metric descriptor. Only user-created
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_ can be deleted.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]')
>>> api.delete_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example of ``{metric_id}`` is:
``\"custom.googleapis.com/my_test_metric\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name)
self._delete_metric_descriptor(request, options)
def list_time_series(self,
name,
filter_,
interval,
view,
aggregation=None,
order_by='',
page_size=0,
options=None):
"""
Lists time series that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.cloud.gapic.monitoring.v3 import enums
>>> from google.cloud.proto.monitoring.v3 import common_pb2
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> filter_ = ''
>>> interval = common_pb2.TimeInterval()
>>> view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
>>>
>>> # Iterate over all results
>>> for element in api.list_time_series(name, filter_, interval, view):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_time_series(name, filter_, interval, view, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
\"projects/{project_id_or_number}\".
filter_ (string): A `monitoring filter <https://cloud.google.com/monitoring/api/v3/filters>`_ that specifies which time
series should be returned. The filter must specify a single metric type,
and can additionally specify metric labels and other information. For
example:
::
metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND
metric.label.instance_name = \"my-instance-name\"
interval (:class:`google.cloud.proto.monitoring.v3.common_pb2.TimeInterval`): The time interval for which results should be returned. Only time series
that contain data points in the specified interval are included
in the response.
aggregation (:class:`google.cloud.proto.monitoring.v3.common_pb2.Aggregation`): By default, the raw time series data is returned.
Use this field to combine multiple time series for different
views of the data.
order_by (string): Specifies the order in which the points of the time series should
be returned. By default, results are not ordered. Currently,
this field must be left blank.
view (enum :class:`google.cloud.gapic.monitoring.v3.enums.ListTimeSeriesRequest.TimeSeriesView`): Specifies which information is returned about the time series.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.cloud.proto.monitoring.v3.metric_pb2.TimeSeries` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
if aggregation is None:
aggregation = common_pb2.Aggregation()
# Create the request object.
request = metric_service_pb2.ListTimeSeriesRequest(
name=name,
filter=filter_,
interval=interval,
view=view,
aggregation=aggregation,
order_by=order_by,
page_size=page_size)
return self._list_time_series(request, options)
def create_time_series(self, name, time_series, options=None):
"""
Creates or adds data to one or more time series.
The response is empty if all time series in the request were written.
If any time series could not be written, a corresponding failure message is
included in the error response.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.cloud.proto.monitoring.v3 import metric_pb2 as v3_metric_pb2
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> time_series = []
>>> api.create_time_series(name, time_series)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
time_series (list[:class:`google.cloud.proto.monitoring.v3.metric_pb2.TimeSeries`]): The new data to be added to a list of time series.
Adds at most one data point to each of several time series. The new data
point must be more recent than any other point in its time series. Each
``TimeSeries`` value must fully specify a unique time series by supplying
all label values for the metric and the monitored resource.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.CreateTimeSeriesRequest(
name=name, time_series=time_series)
self._create_time_series(request, options)
| {
"content_hash": "7fddd93a27b703dd0097565be0582601",
"timestamp": "",
"source": "github",
"line_count": 635,
"max_line_length": 170,
"avg_line_length": 46.113385826771655,
"alnum_prop": 0.6253671197322588,
"repo_name": "shinfan/api-client-staging",
"id": "0103b9ade53e3fce55a0e604b10f83fb306b08cb",
"size": "30336",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "generated/python/gapic-google-cloud-monitoring-v3/google/cloud/gapic/monitoring/v3/metric_service_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "2842394"
},
{
"name": "JavaScript",
"bytes": "890945"
},
{
"name": "PHP",
"bytes": "3763710"
},
{
"name": "Protocol Buffer",
"bytes": "605865"
},
{
"name": "Python",
"bytes": "1395644"
},
{
"name": "Ruby",
"bytes": "2468895"
},
{
"name": "Shell",
"bytes": "592"
}
],
"symlink_target": ""
} |
from django.test import client
from mailserver.handlers import BaseMessageHandler
from django.test import signals
from django.utils.functional import curry
class Client(client.Client):
def __init__(self, **defaults):
client.Client.__init__(self, **defaults)
self.handler = BaseMessageHandler()
def request(self, request):
environ = { }
environ.update(self.defaults)
# curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(client.store_rendered_templates, data)
signals.template_rendered.connect(on_template_render)
## capture exceptions created by the handler.
#got_request_exception.connect(self.store_exc_info)
response = self.handler(environ, request)
if self.exc_info:
exc_info = self.exc_info
self.exc_info = none
raise exc_info[1], none, exc_info[2]
# save the client and request that stimulated the response.
response.client = self
response.request = request
# add any rendered template detail to the response.
# if there was only one template rendered (the most likely case),
# flatten the list to a single element.
for detail in ('template', 'context'):
if data.get(detail):
if len(data[detail]) == 1:
setattr(response, detail, data[detail][0]);
else:
setattr(response, detail, data[detail])
else:
setattr(response, detail, None)
return response
| {
"content_hash": "e0ea1534f7a4c97570f8be5f344d3e4d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 34.5625,
"alnum_prop": 0.6142254370102471,
"repo_name": "telenieko/django-mailserver",
"id": "8ff928b070b328cd6eb50b30b4545a8a459eab1c",
"size": "1659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mailserver/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38658"
}
],
"symlink_target": ""
} |
from os.path import abspath
from StringIO import StringIO
import numpy
import unittest
# Enthought library imports.
from mayavi.core.null_engine import NullEngine
from mayavi.sources.builtin_surface import BuiltinSurface
from mayavi.modules.surface import Surface
from mayavi.modules.outline import Outline
class TestBuiltinSurfaceSource(unittest.TestCase):
def setUp(self):
"""Initial setting up of test fixture, automatically called by
TestCase before any other test method is invoked"""
e = NullEngine()
# Uncomment to see visualization for debugging etc.
#e = Engine()
e.start()
s=e.new_scene()
poly_data = BuiltinSurface()
e.add_source(poly_data)
outline = Outline()
e.add_module(outline)
surface = Surface()
e.add_module(surface)
poly_data.data_source.shaft_radius = 0.05
poly_data.data_source.shaft_resolution = 7
poly_data.data_source.tip_radius = 0.1
self.e=e
self.scene = e.current_scene
return
def tearDown(self):
"""For necessary clean up, automatically called by TestCase
after the test methods have been invoked"""
self.e.stop()
return
def test_poly_data_source(self):
"""Do the basic testing"""
s = self.scene
src = s.children[0]
#Check the properties of the default source
self.assertEqual(src.source,'arrow')
self.assertEqual(src.data_source.shaft_radius,0.05)
self.assertEqual(src.data_source.shaft_resolution,7)
self.assertEqual(src.data_source.tip_radius,0.1)
def check(self):
"""Do the actual testing."""
s = self.scene
src = s.children[0]
ot = src.children[0].children[0]
ot.render() # Flush the pipeline.
# Check the outline bounds
self.assertEqual(numpy.allclose(ot.outline_filter.output.bounds,
(-0.5, 0.5, -0.5, 0.5, -0.475, 0.475),
atol=1.01e-03), True)
self.assertEqual(numpy.allclose(src.data_source.angle, 26.565,
atol=1.01e-03),True)
self.assertEqual(numpy.allclose(src.data_source.direction,(1., 0., 0.)),True)
self.assertEqual(src.data_source.radius,0.5)
self.assertEqual(src.data_source.height,1.0)
self.assertEqual(numpy.allclose(src.data_source.center,(0., 0., 0.)),True)
self.assertEqual(src.data_source.resolution, 10)
#Modify Properties and check again
src.data_source.height = 1.5
src.data_source.angle = 30
src.data_source.modified()
self.assertEqual(numpy.allclose(src.data_source.radius,0.866,atol=1.01e-03),True)
def test_change(self):
"""Test if it works fine on changing the source"""
s = self.scene
src = s.children[0]
ot = src.children[0].children[0]
src.source = 'cone'
src.data_source.resolution = 10
# Check with the default properties of cone to verify that the
# source has actually changed
self.assertEqual(src.source,'cone')
self.check()
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
engine = self.e
scene = self.scene
src = scene.children[0]
src.source = 'cone'
src.data_source.resolution = 10
# Save visualization.
f = StringIO()
f.name = abspath('test.mv2') # We simulate a file.
engine.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine.close_scene(scene)
# Load visualization
engine.load_visualization(f)
self.scene = engine.current_scene
self.check()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1e69828112b71ace7bc031c4601edbc9",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 89,
"avg_line_length": 31.96747967479675,
"alnum_prop": 0.6065615462868769,
"repo_name": "liulion/mayavi",
"id": "d7edcc8fc4f4da39630609346bf150b0ab1560cc",
"size": "4131",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mayavi/tests/test_builtin_surface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2511883"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import, division, print_function
import random
from functools import total_ordering
from collections import Iterable
from enum import Enum, EnumMeta
class _PokerEnumMeta(EnumMeta):
def __init__(self, clsname, bases, classdict):
# make sure we only have tuple values, not single values
for member in self.__members__.values():
values = member._value_
if not isinstance(values, Iterable) or isinstance(values, basestring):
raise TypeError('{} = {!r}, should be iterable, not {}!'
.format(member._name_, values, type(values))
)
for alias in values:
if isinstance(alias, unicode):
alias = alias.upper()
self._value2member_map_.setdefault(alias, member)
def __call__(cls, value):
"""Return the appropriate instance with any of the values listed. If values contains
text types, those will be looked up in a case insensitive manner."""
if isinstance(value, unicode):
value = value.upper()
return super(_PokerEnumMeta, cls).__call__(value)
def make_random(cls):
return random.choice(list(cls))
@total_ordering
class _OrderableMixin(object):
# I couldn't inline this to PokerEnum because Enum do some magic which don't like it.
# From Python manual:
# If a class that overrides __eq__() needs to retain
# the implementation of __hash__() from a parent class,
# the interpreter must be told this explicitly
def __hash__(self):
return super(_OrderableMixin, self).__hash__()
def __eq__(self, other):
if self.__class__ is other.__class__:
return self._value_ == other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
names = self.__class__._member_names_
return names.index(self._name_) < names.index(other._name_)
return NotImplemented
def __reduce_ex__(self, proto):
return self.__class__.__name__
class PokerEnum(_OrderableMixin, Enum):
__metaclass__ = _PokerEnumMeta
def __unicode__(self):
return unicode(self._value_[0])
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
val = self._value_[0]
apostrophe = "'" if isinstance(val, unicode) else ''
return "{0}({1}{2}{1})".format(self.__class__.__name__, apostrophe, val).encode('utf-8')
def __format__(self, format_spec):
return unicode(self._value_[0])
@property
def val(self):
"""The first value of the Enum member."""
return self._value_[0]
class _ReprMixin(object):
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return "{}('{}')".format(self.__class__.__name__, self).encode('utf-8')
def _make_float(string):
return float(string.strip().replace(',', ''))
def _make_int(string):
return int(string.strip().replace(',', ''))
| {
"content_hash": "5fbee425ba7bdb0adda0ea920e1f1fca",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 96,
"avg_line_length": 32.354166666666664,
"alnum_prop": 0.5952994204764971,
"repo_name": "Seanmcn/poker",
"id": "04793b7f9429775af298239805f6f0fa69e4ddd3",
"size": "3130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poker/_common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1463"
},
{
"name": "Python",
"bytes": "192251"
}
],
"symlink_target": ""
} |
"""Gherkin step implementations for chart axis features."""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from behave import given, then, when
from pptx import Presentation
from pptx.enum.chart import XL_AXIS_CROSSES, XL_CATEGORY_TYPE
from helpers import test_pptx
# given ===================================================
@given('a {axis_type} axis')
def given_a_axis_type_axis(context, axis_type):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = {
'category': chart.category_axis,
'value': chart.value_axis,
}[axis_type]
@given('a major gridlines')
def given_a_major_gridlines(context):
prs = Presentation(test_pptx('cht-gridlines-props'))
axis = prs.slides[0].shapes[0].chart.value_axis
context.gridlines = axis.major_gridlines
@given('a value axis having category axis crossing of {crossing}')
def given_a_value_axis_having_cat_ax_crossing_of(context, crossing):
slide_idx = {
'automatic': 0,
'maximum': 2,
'minimum': 3,
'2.75': 4,
'-1.5': 5,
}[crossing]
prs = Presentation(test_pptx('cht-axis-props'))
context.value_axis = prs.slides[slide_idx].shapes[0].chart.value_axis
@given('an axis')
def given_an_axis(context):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.value_axis
@given('an axis having {a_or_no} title')
def given_an_axis_having_a_or_no_title(context, a_or_no):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[7].shapes[0].chart
context.axis = {
'a': chart.value_axis,
'no': chart.category_axis,
}[a_or_no]
@given('an axis having {major_or_minor} gridlines')
def given_an_axis_having_major_or_minor_gridlines(context, major_or_minor):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.value_axis
@given('an axis having {major_or_minor} unit of {value}')
def given_an_axis_having_major_or_minor_unit_of_value(
context, major_or_minor, value):
slide_idx = 0 if value == 'Auto' else 1
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.axis = chart.value_axis
@given('an axis of type {cls_name}')
def given_an_axis_of_type_cls_name(context, cls_name):
slide_idx = {
'CategoryAxis': 0,
'DateAxis': 6,
}[cls_name]
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.axis = chart.category_axis
@given('an axis not having {major_or_minor} gridlines')
def given_an_axis_not_having_major_or_minor_gridlines(context, major_or_minor):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.category_axis
@given('an axis title')
def given_an_axis_title(context):
prs = Presentation(test_pptx('cht-axis-props'))
context.axis_title = prs.slides[7].shapes[0].chart.value_axis.axis_title
@given('an axis title having {a_or_no} text frame')
def given_an_axis_title_having_a_or_no_text_frame(context, a_or_no):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[7].shapes[0].chart
axis = {
'a': chart.value_axis,
'no': chart.category_axis,
}[a_or_no]
context.axis_title = axis.axis_title
@given('tick labels having an offset of {setting}')
def given_tick_labels_having_an_offset_of_setting(context, setting):
slide_idx = {
'no explicit setting': 0,
'420': 1,
}[setting]
prs = Presentation(test_pptx('cht-ticklabels-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.tick_labels = chart.category_axis.tick_labels
# when ====================================================
@when('I assign {value} to axis.has_title')
def when_I_assign_value_to_axis_has_title(context, value):
context.axis.has_title = {'True': True, 'False': False}[value]
@when('I assign {value} to axis.has_{major_or_minor}_gridlines')
def when_I_assign_value_to_axis_has_major_or_minor_gridlines(
context, value, major_or_minor):
axis = context.axis
propname = 'has_%s_gridlines' % major_or_minor
new_value = {'True': True, 'False': False}[value]
setattr(axis, propname, new_value)
@when('I assign {value} to axis.{major_or_minor}_unit')
def when_I_assign_value_to_axis_major_or_minor_unit(
context, value, major_or_minor):
axis = context.axis
propname = '%s_unit' % major_or_minor
new_value = {'8.4': 8.4, '5': 5, 'None': None}[value]
setattr(axis, propname, new_value)
@when('I assign {value} to axis_title.has_text_frame')
def when_I_assign_value_to_axis_title_has_text_frame(context, value):
context.axis_title.has_text_frame = {'True': True, 'False': False}[value]
@when('I assign {value} to tick_labels.offset')
def when_I_assign_value_to_tick_labels_offset(context, value):
new_value = int(value)
context.tick_labels.offset = new_value
@when('I assign {member} to value_axis.crosses')
def when_I_assign_member_to_value_axis_crosses(context, member):
value_axis = context.value_axis
value_axis.crosses = getattr(XL_AXIS_CROSSES, member)
@when('I assign {value} to value_axis.crosses_at')
def when_I_assign_value_to_value_axis_crosses_at(context, value):
new_value = None if value == 'None' else float(value)
context.value_axis.crosses_at = new_value
# then ====================================================
@then('axis.axis_title is an AxisTitle object')
def then_axis_axis_title_is_an_AxisTitle_object(context):
class_name = type(context.axis.axis_title).__name__
assert class_name == 'AxisTitle', 'got %s' % class_name
@then('axis.category_type is XL_CATEGORY_TYPE.{member}')
def then_axis_category_type_is_XL_CATEGORY_TYPE_member(context, member):
expected_value = getattr(XL_CATEGORY_TYPE, member)
category_type = context.axis.category_type
assert category_type is expected_value, 'got %s' % category_type
@then('axis.format is a ChartFormat object')
def then_axis_format_is_a_ChartFormat_object(context):
axis = context.axis
assert type(axis.format).__name__ == 'ChartFormat'
@then('axis.format.fill is a FillFormat object')
def then_axis_format_fill_is_a_FillFormat_object(context):
axis = context.axis
assert type(axis.format.fill).__name__ == 'FillFormat'
@then('axis.format.line is a LineFormat object')
def then_axis_format_line_is_a_LineFormat_object(context):
axis = context.axis
assert type(axis.format.line).__name__ == 'LineFormat'
@then('axis.has_title is {value}')
def then_axis_has_title_is_value(context, value):
axis = context.axis
actual_value = axis.has_title
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis.has_{major_or_minor}_gridlines is {value}')
def then_axis_has_major_or_minor_gridlines_is_expected_value(
context, major_or_minor, value):
axis = context.axis
actual_value = {
'major': axis.has_major_gridlines,
'minor': axis.has_minor_gridlines,
}[major_or_minor]
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis.major_gridlines is a MajorGridlines object')
def then_axis_major_gridlines_is_a_MajorGridlines_object(context):
axis = context.axis
assert type(axis.major_gridlines).__name__ == 'MajorGridlines'
@then('axis.{major_or_minor}_unit is {value}')
def then_axis_major_or_minor_unit_is_value(context, major_or_minor, value):
axis = context.axis
propname = '%s_unit' % major_or_minor
actual_value = getattr(axis, propname)
expected_value = {
'20.0': 20.0, '8.4': 8.4, '5.0': 5.0, '4.2': 4.2, 'None': None
}[value]
assert actual_value == expected_value, 'got %s' % actual_value
@then('axis_title.format is a ChartFormat object')
def then_axis_title_format_is_a_ChartFormat_object(context):
class_name = type(context.axis_title.format).__name__
assert class_name == 'ChartFormat', 'got %s' % class_name
@then('axis_title.format.fill is a FillFormat object')
def then_axis_title_format_fill_is_a_FillFormat_object(context):
class_name = type(context.axis_title.format.fill).__name__
assert class_name == 'FillFormat', 'got %s' % class_name
@then('axis_title.format.line is a LineFormat object')
def then_axis_title_format_line_is_a_LineFormat_object(context):
class_name = type(context.axis_title.format.line).__name__
assert class_name == 'LineFormat', 'got %s' % class_name
@then('axis_title.has_text_frame is {value}')
def then_axis_title_has_text_frame_is_value(context, value):
actual_value = context.axis_title.has_text_frame
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis_title.text_frame is a TextFrame object')
def then_axis_title_text_frame_is_a_TextFrame_object(context):
class_name = type(context.axis_title.text_frame).__name__
assert class_name == 'TextFrame', 'got %s' % class_name
@then('gridlines.format is a ChartFormat object')
def then_gridlines_format_is_a_ChartFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format).__name__ == 'ChartFormat'
@then('gridlines.format.fill is a FillFormat object')
def then_gridlines_format_fill_is_a_FillFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format.fill).__name__ == 'FillFormat'
@then('gridlines.format.line is a LineFormat object')
def then_gridlines_format_line_is_a_LineFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format.line).__name__ == 'LineFormat'
@then('tick_labels.offset is {value}')
def then_tick_labels_offset_is_expected_value(context, value):
expected_value = int(value)
tick_labels = context.tick_labels
assert tick_labels.offset == expected_value, (
'got %s' % tick_labels.offset
)
@then('value_axis.crosses is {member}')
def then_value_axis_crosses_is_value(context, member):
value_axis = context.value_axis
expected_value = getattr(XL_AXIS_CROSSES, member)
assert value_axis.crosses == expected_value, 'got %s' % value_axis.crosses
@then('value_axis.crosses_at is {value}')
def then_value_axis_crosses_at_is_value(context, value):
value_axis = context.value_axis
expected_value = None if value == 'None' else float(value)
assert value_axis.crosses_at == expected_value, (
'got %s' % value_axis.crosses_at
)
| {
"content_hash": "acd80359f3ada13285f822fd25e7b37f",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 79,
"avg_line_length": 34.3968253968254,
"alnum_prop": 0.6697738809413937,
"repo_name": "biggihs/python-pptx",
"id": "853e5c5544b6caeae376f7207741658a5a26e8ad",
"size": "10854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "features/steps/axis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "101192"
},
{
"name": "Makefile",
"bytes": "2091"
},
{
"name": "PLpgSQL",
"bytes": "48599"
},
{
"name": "Python",
"bytes": "1877645"
}
],
"symlink_target": ""
} |
import os.path as op
import tempita
from depsolver._package_utils \
import \
parse_package_full_name
from depsolver.compat \
import \
OrderedDict
from depsolver.package \
import \
PackageInfo
from depsolver.requirement \
import \
Requirement
from depsolver.solver.rules_generator \
import \
RulesGenerator
from depsolver.bundled.traitlets \
import \
HasTraits, Instance
from depsolver.solver.tests.scenarios.common \
import \
COMMON_IMPORTS, BaseScenario, packages_list_to_php_json, \
job_to_php_constraints, run_php_scenarios
DATA = op.join(op.dirname(__file__), "data", "rules_generator")
P = PackageInfo.from_string
R = Requirement.from_string
TEMPLATE = """\
<?php
require {{bootstrap_path}};
{{common_imports}}
$loader = new ArrayLoader();
/* Remote repository definition */
$remote_repo_json = '
{{remote_repo_json_string}}
';
$packages = JsonFile::parseJson($remote_repo_json);
$remote_repo = new WritableArrayRepository();
foreach ($packages as $packageData) {
$package = $loader->load($packageData);
$remote_repo->addPackage($package);
}
/* Installed repository definition */
$repo_json = '
{{installed_repo_json_string}}
';
$packages = JsonFile::parseJson($repo_json);
$installed_repo = new WritableArrayRepository();
foreach ($packages as $packageData) {
$package = $loader->load($packageData);
$installed_repo->addPackage($package);
}
/* Pool definition */
$pool = new Pool();
$pool->addRepository($remote_repo);
$pool->addRepository($installed_repo);
$request = new Request($pool);
{{for operation, requirement_name, constraints in request}}
$constraints = array(
{{constraints}}
);
$request_constraints = new MultiConstraint($constraints);
$request->{{operation}}("{{requirement_name}}", $request_constraints);
{{endfor}}
class DebuggingSolver extends Solver
{
public function printRules(Request $request)
{
$this->jobs = $request->getJobs();
$this->setupInstalledMap();
$this->decisions = new Decisions($this->pool);
$this->rules = $this->ruleSetGenerator->getRulesFor($this->jobs, $this->installedMap);
$this->watchGraph = new RuleWatchGraph;
foreach ($this->rules as $rule) {
printf("%s\\n", $rule);
}
}
}
$policy = new DefaultPolicy();
$solver = new DebuggingSolver($policy, $pool, $installed_repo);
$solver->printRules($request);
"""
class RulesGeneratorScenario(HasTraits):
_base_scenario = Instance(BaseScenario)
@property
def remote_repository(self):
return self._base_scenario.remote_repository
@property
def installed_repository(self):
return self._base_scenario.installed_repository
@property
def pool(self):
return self._base_scenario.pool
@property
def request(self):
return self._base_scenario.request
@classmethod
def from_yaml(cls, filename):
base_scenario = BaseScenario.from_yaml(filename)
return cls(_base_scenario=base_scenario)
@classmethod
def from_data(cls, remote_packages, installed_packages, request_jobs):
base_scenario = BaseScenario.from_data(filename)
return cls(_base_scenario=base_scenario)
def compute_rules(self):
installed_map = OrderedDict()
for package in self.installed_repository.iter_packages():
installed_map[package.id] = package
rules_generator = RulesGenerator(self.pool, self.request, installed_map)
return list(rules_generator.iter_rules())
def to_php(self, filename="test_installed_map.php", composer_location=None):
if composer_location is None:
bootstrap_path = "__DIR__.'/src/bootstrap.php'"
else:
bootstrap_path = "'%s'" % op.join(composer_location, "src", "bootstrap.php")
template = tempita.Template(TEMPLATE)
remote_packages = self.remote_repository.list_packages()
installed_packages = self.installed_repository.list_packages()
variables = {
"bootstrap_path": bootstrap_path,
"remote_repo_json_string": packages_list_to_php_json(remote_packages),
"installed_repo_json_string": packages_list_to_php_json(installed_packages),
"request": [(job.job_type, job.requirement.name, job_to_php_constraints(job)) \
for job in self.request.jobs],
"common_imports": COMMON_IMPORTS,
}
with open(filename, "wt") as fp:
fp.write(template.substitute(variables))
def post_process(output):
"""Crappy function to convert php rule string to depsolver ones."""
lines = []
for line in output.splitlines():
new_parts = []
parts = [p.strip() for p in line[1:-1].split("|")]
for part in parts:
if part.startswith("-"):
part = part[1:-2]
name, version = parse_package_full_name(part)
new_part = "-" + "%s-%s" % (name, str(version))
else:
part = part[:-2]
name, version = parse_package_full_name(part)
new_part = "%s-%s" % (name, str(version))
new_parts.append(new_part)
lines.append("(" + " | ".join(new_parts) + ")")
lines.append("")
return "\n".join(lines)
if __name__ == "__main__":
data_directory = op.join(op.dirname(__file__), "data", "rules_generator")
run_php_scenarios(data_directory, RulesGeneratorScenario, post_process)
| {
"content_hash": "7e02a34effbbb857a165aecdfa73f10e",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 95,
"avg_line_length": 29.638297872340427,
"alnum_prop": 0.6331658291457286,
"repo_name": "enthought/depsolver",
"id": "da03d65e57c56a3957d36f21c15ab6e404381398",
"size": "5572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "depsolver/solver/tests/scenarios/rules_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5585"
},
{
"name": "Python",
"bytes": "307961"
},
{
"name": "Shell",
"bytes": "5111"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SpecimenInfo',
version='1.3.0',
description=('Fetch and format plant specimen informations from data file'
' and web, save outcome to xlsx file and SQLite3 db file.'),
author='Haofei Jin',
author_email='[email protected]',
url='https://github.com/zxjsdp/SpecimenInfo',
license='Apache',
keywords='specimen automated plant format xlsx',
packages=['specimen_info'],
install_requires=['requests', 'BeautifulSoup4', 'openpyxl'],
# $ pip install -e .[dev,test]
extras_require={
'dev': ['pytest', 'tox', 'sphinx'],
'test': ['pytest'],
},
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| {
"content_hash": "afdfb7243574b618096bab0d9b474656",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 36,
"alnum_prop": 0.6004273504273504,
"repo_name": "zxjsdp/SpecimenInfo",
"id": "03542ac9832c3c27dec8e4042e5256bc0accd8a4",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "106217"
}
],
"symlink_target": ""
} |
import django.dispatch
invite_sent = django.dispatch.Signal(providing_args=["invitation",])
invite_accepted = django.dispatch.Signal(providing_args=["invitation"])
| {
"content_hash": "d47f5ee49178f368750be496c0518a31",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 71,
"avg_line_length": 33.2,
"alnum_prop": 0.7831325301204819,
"repo_name": "amitagrawal/kaleo",
"id": "3b8928cbaecb8811a3e6e6078975591c57c71490",
"size": "166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kaleo/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from direct.distributed.ClockDelta import globalClockDelta
from pandac.PandaModules import Point3
from toontown.toonbase import TTLocalizer
import PartyGlobals
from DistributedPartyTeamActivity import DistributedPartyTeamActivity
from PartyCogActivity import PartyCogActivity
class DistributedPartyCogActivity(DistributedPartyTeamActivity):
notify = directNotify.newCategory('DistributedPartyCogActivity')
players = {}
localPlayer = None
view = None
def __init__(self, cr, arenaModel = 'phase_13/models/parties/cogPieArena_model', texture = None):
DistributedPartyTeamActivity.__init__(self, cr, PartyGlobals.ActivityIds.PartyCog, startDelay=PartyGlobals.CogActivityStartDelay, balanceTeams=PartyGlobals.CogActivityBalanceTeams)
self.arenaModel = arenaModel
self.texture = texture
def load(self):
DistributedPartyTeamActivity.load(self)
self.view = PartyCogActivity(self, self.arenaModel, self.texture)
self.view.load()
def announceGenerate(self):
DistributedPartyTeamActivity.announceGenerate(self)
for i in xrange(len(self.toonIds)):
for toonId in self.toonIds[i]:
toon = base.cr.doId2do.get(toonId, None)
if toon:
self.view.handleToonJoined(toon, i, lateEntry=True)
return
def unload(self):
if hasattr(self, 'view') and self.view is not None:
self.view.unload()
del self.view
DistributedPartyTeamActivity.unload(self)
return
def enable(self):
DistributedPartyTeamActivity.enable(self)
def disable(self):
DistributedPartyTeamActivity.disable(self)
def getTitle(self):
return TTLocalizer.PartyCogTitle
def getInstructions(self):
return TTLocalizer.PartyCogInstructions
def pieThrow(self, toonId, timestamp, h, x, y, z, power):
if toonId not in self.toonIds:
return
if toonId != base.localAvatar.doId:
self.view.pieThrow(toonId, timestamp, h, Point3(x, y, z), power)
def b_pieThrow(self, toon, power):
timestamp = globalClockDelta.localToNetworkTime(globalClock.getFrameTime(), bits=32)
pos = toon.getPos()
h = toon.getH()
toonId = toon.doId
self.view.pieThrow(toonId, timestamp, h, pos, power)
self.d_broadcastPieThrow(toonId, timestamp, h, pos[0], pos[1], pos[2], power)
def d_broadcastPieThrow(self, toonId, timestamp, h, x, y, z, power):
self.sendUpdate('pieThrow', [toonId,
timestamp,
h,
x,
y,
z,
power])
def pieHitsToon(self, toonId, timestamp, x, y, z):
if toonId not in self.toonIds:
return
self.view.pieHitsToon(toonId, timestamp, Point3(x, y, z))
def d_broadcastPieHitsToon(self, toonId, timestamp, pos):
self.sendUpdate('pieHitsToon', [toonId,
timestamp,
pos[0],
pos[1],
pos[2]])
def b_pieHitsToon(self, toonId, timestamp, pos):
self.view.pieHitsToon(toonId, timestamp, pos)
self.d_broadcastPieHitsToon(toonId, timestamp, pos)
def pieHitsCog(self, toonId, timestamp, hitCogNum, x, y, z, direction, part):
if toonId not in self.toonIds:
return
if toonId != base.localAvatar.doId:
self.view.pieHitsCog(timestamp, hitCogNum, Point3(x, y, z), direction, part)
def b_pieHitsCog(self, timestamp, hitCogNum, pos, direction, part):
self.view.pieHitsCog(timestamp, hitCogNum, pos, direction, part)
self.d_broadcastSendPieHitsCog(timestamp, hitCogNum, pos, direction, part)
def d_broadcastSendPieHitsCog(self, timestamp, hitCogNum, pos, direction, part):
self.sendUpdate('pieHitsCog', [base.localAvatar.doId,
timestamp,
hitCogNum,
pos[0],
pos[1],
pos[2],
direction,
part])
def setCogDistances(self, distances):
self.view.setCogDistances(distances)
def setHighScore(self, toonName, score):
self.setSignNote(TTLocalizer.PartyCogSignNote % (toonName, score))
def handleToonJoined(self, toonId):
DistributedPartyTeamActivity.handleToonJoined(self, toonId)
toon = base.cr.doId2do.get(toonId, None)
team = self.getTeam(toonId)
if toon is not None and self.view is not None:
self.view.handleToonJoined(toon, team)
return
def handleToonExited(self, toonId):
toon = base.cr.doId2do.get(toonId, None)
if toon is None:
return
if self.view is not None:
self.view.handleToonExited(toon)
DistributedPartyTeamActivity.handleToonExited(self, toonId)
return
def handleToonShifted(self, toonId):
toon = base.cr.doId2do.get(toonId, None)
if toon is None:
return
if self.view is not None:
self.view.handleToonShifted(toon)
return
def handleToonSwitchedTeams(self, toonId):
DistributedPartyTeamActivity.handleToonSwitchedTeams(self, toonId)
toon = base.cr.doId2do.get(toonId, None)
if toon is None:
return
if self.view is not None:
self.view.handleToonSwitchedTeams(toon)
return
def handleToonDisabled(self, toonId):
if self.view is not None:
self.view.handleToonDisabled(toonId)
return
def startWaitForEnough(self):
DistributedPartyTeamActivity.startWaitForEnough(self)
self.view.openArenaDoors()
self.view.hideCogs()
def startRules(self):
DistributedPartyTeamActivity.startRules(self)
self.view.closeArenaDoors()
self.view.showCogs()
def startActive(self):
DistributedPartyTeamActivity.startActive(self)
self.view.startActivity(self.getCurrentActivityTime())
self.view.closeArenaDoors()
if not self.isLocalToonPlaying:
self.view.showArenaDoorTimers(self._duration + PartyGlobals.CogActivityConclusionDuration + 1.0 - self.getCurrentActivityTime())
def finishActive(self):
DistributedPartyTeamActivity.finishActive(self)
self.view.stopActivity()
def startConclusion(self, data):
DistributedPartyTeamActivity.startConclusion(self, data)
if self.isLocalToonPlaying:
score = (int(data / 10000), data % 10000)
winner = 2
if score[PartyGlobals.TeamActivityTeams.LeftTeam] > score[PartyGlobals.TeamActivityTeams.RightTeam]:
winner = PartyGlobals.TeamActivityTeams.LeftTeam
elif score[PartyGlobals.TeamActivityTeams.LeftTeam] < score[PartyGlobals.TeamActivityTeams.RightTeam]:
winner = PartyGlobals.TeamActivityTeams.RightTeam
if winner < 2:
if self.getTeam(base.localAvatar.doId) == winner:
resultsText = TTLocalizer.PartyTeamActivityLocalAvatarTeamWins
else:
resultsText = TTLocalizer.PartyTeamActivityWins % TTLocalizer.PartyCogTeams[winner]
else:
resultsText = TTLocalizer.PartyTeamActivityGameTie
self.view.showResults(resultsText, winner, score)
def finishConclusion(self):
self.view.hideResults()
DistributedPartyTeamActivity.finishConclusion(self)
self.view.hideArenaDoorTimers()
| {
"content_hash": "ed94cb8f67dc9506512463e3697ab3fb",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 188,
"avg_line_length": 37.402010050251256,
"alnum_prop": 0.6584710466209862,
"repo_name": "Spiderlover/Toontown",
"id": "7a6b97208cfa65e9fe205841bd9871734e360d48",
"size": "7443",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/parties/DistributedPartyCogActivity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
} |
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
from . import inference
from . import train
from .utils import evaluation_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
utils.check_tensorflow_version()
FLAGS = None
INFERENCE_KEYS = ["src_max_len_infer", "tgt_max_len_infer", "subword_option",
"infer_batch_size", "beam_width",
"length_penalty_weight", "sampling_temperature",
"num_translations_per_input", "infer_mode"]
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument("--num_units", type=int, default=32, help="Network size.")
parser.add_argument("--num_layers", type=int, default=2,
help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument("--encoder_type", type=str, default="uni", help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument("--residual", type="bool", nargs="?", const=True,
default=False,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument("--attention", type=str, default="", help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="standard",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument("--optimizer", type=str, default="sgd", help="sgd | adam")
parser.add_argument("--learning_rate", type=float, default=1.0,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--num_train_steps", type=int, default=12000, help="Num steps to train.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument("--src", type=str, default=None,
help="Source suffix, e.g., en.")
parser.add_argument("--tgt", type=str, default=None,
help="Target suffix, e.g., de.")
parser.add_argument("--train_prefix", type=str, default=None,
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument("--dev_prefix", type=str, default=None,
help="Dev prefix, expect files with src/tgt suffixes.")
parser.add_argument("--test_prefix", type=str, default=None,
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument("--out_dir", type=str, default=None,
help="Store log/model files.")
# Vocab
parser.add_argument("--vocab_prefix", type=str, default=None, help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument("--embed_prefix", type=str, default=None, help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formated txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument("--share_vocab", type="bool", nargs="?", const=True,
default=False,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument("--src_max_len", type=int, default=50,
help="Max length of src sequences during training.")
parser.add_argument("--tgt_max_len", type=int, default=50,
help="Max length of tgt sequences during training.")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference.")
parser.add_argument("--tgt_max_len_infer", type=int, default=None,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=1.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=100,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument("--max_train", type=int, default=0,
help="Limit on the size of training data (0: no limit).")
parser.add_argument("--num_buckets", type=int, default=5,
help="Put data into similar-length buckets.")
parser.add_argument("--num_sampled_softmax", type=int, default=0,
help=("Use sampled_softmax_loss if > 0."
"Otherwise, use full softmax loss."))
# SPM
parser.add_argument("--subword_option", type=str, default="",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of gpus in each worker.")
parser.add_argument("--log_device_placement", type="bool", nargs="?",
const=True, default=False, help="Debug GPU allocation.")
parser.add_argument("--metrics", type=str, default="bleu",
help=("Comma-separated list of evaluations "
"metrics (bleu,rouge,accuracy)"))
parser.add_argument("--steps_per_external_eval", type=int, default=None,
help="""\
How many training steps to do per external evaluation. Automatically set
based on data if None.\
""")
parser.add_argument("--scope", type=str, default=None,
help="scope to put variables under")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument("--random_seed", type=int, default=None,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--override_loaded_hparams", type="bool", nargs="?",
const=True, default=False,
help="Override loaded hparams with values specified")
parser.add_argument("--num_keep_ckpts", type=int, default=5,
help="Max number of checkpoints to keep.")
parser.add_argument("--avg_ckpts", type="bool", nargs="?",
const=True, default=False, help=("""\
Average the last N checkpoints for external evaluation.
N can be controlled by setting --num_keep_ckpts.\
"""))
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument("--inference_input_file", type=str, default=None,
help="Set to the text to decode.")
parser.add_argument("--inference_list", type=str, default=None,
help=("A comma-separated list of sentence indices "
"(0-based) to decode."))
parser.add_argument("--infer_batch_size", type=int, default=None,
help="Batch size for inference mode.")
parser.add_argument("--inference_output_file", type=str, default=None,
help="Output file to store decoding results.")
parser.add_argument("--inference_ref_file", type=str, default=None,
help=("""\
Reference file to compute evaluation scores (if provided).\
"""))
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="greedy",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=0,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument("--length_penalty_weight", type=float, default=0.0,
help="Length penalty for beam search.")
parser.add_argument("--sampling_temperature", type=float,
default=0.0,
help=("""\
Softmax sampling temperature for inference decoding, 0.0 means greedy
decoding. This option is ignored when using beam search.\
"""))
parser.add_argument("--num_translations_per_input", type=int, default=1,
help=("""\
Number of translations generated for each sentence. This is only used for
inference.\
"""))
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).")
parser.add_argument("--num_inter_threads", type=int, default=0,
help="number of inter_op_parallelism_threads")
parser.add_argument("--num_intra_threads", type=int, default=0,
help="number of intra_op_parallelism_threads")
parser.add_argument("--iterations", type=int, default=1,
help="number of iterations")
parser.add_argument("--workloadName", type=str, default="",
help="name of workload")
parser.add_argument("--run", type=str, default='performance',
help="Determine criteria run for infernece")
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=flags.train_prefix,
dev_prefix=flags.dev_prefix,
test_prefix=flags.test_prefix,
vocab_prefix=flags.vocab_prefix,
embed_prefix=flags.embed_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
num_train_steps=flags.num_train_steps,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
num_sampled_softmax=flags.num_sampled_softmax,
# Data constraints
num_buckets=flags.num_buckets,
max_train=flags.max_train,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
sampling_temperature=flags.sampling_temperature,
num_translations_per_input=flags.num_translations_per_input,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
steps_per_external_eval=flags.steps_per_external_eval,
share_vocab=flags.share_vocab,
metrics=flags.metrics.split(","),
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
num_keep_ckpts=flags.num_keep_ckpts,
avg_ckpts=flags.avg_ckpts,
language_model=flags.language_model,
num_intra_threads=flags.num_intra_threads,
num_inter_threads=flags.num_inter_threads,
iterations=flags.iterations,
run = flags.run,
)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.infer_mode == "sample" and hparams.sampling_temperature <= 0.0:
raise ValueError("sampling_temperature must greater than 0.0 when using"
"sample decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if getattr(hparams, "language_model", None):
hparams.attention = "normed_bahdanau"
hparams.attention_architecture = "gnmt_v2"
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
check_special_token = getattr(hparams, "check_special_token", True)
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
num_embeddings_partitions = getattr(hparams, "num_embeddings_partitions", 0)
_add_argument(hparams, "num_enc_emb_partitions", num_embeddings_partitions)
_add_argument(hparams, "num_dec_emb_partitions", num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if getattr(hparams, "embed_prefix", None):
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
for metric in hparams.metrics:
best_metric_dir = os.path.join(hparams.out_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
if getattr(hparams, "avg_ckpts", None):
best_metric_dir = os.path.join(hparams.out_dir, "avg_best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "avg_best_" + metric, 0, update=False)
_add_argument(hparams, "avg_best_" + metric + "_dir", best_metric_dir)
return hparams
def ensure_compatible_hparams(hparams, default_hparams, hparams_path=""):
"""Make sure the loaded hparams is compatible with new changes."""
default_hparams = utils.maybe_parse_standard_hparams(
default_hparams, hparams_path)
# Set num encoder/decoder layers (for old checkpoints)
if hasattr(hparams, "num_layers"):
if not hasattr(hparams, "num_encoder_layers"):
hparams.add_hparam("num_encoder_layers", hparams.num_layers)
if not hasattr(hparams, "num_decoder_layers"):
hparams.add_hparam("num_decoder_layers", hparams.num_layers)
# For compatible reason, if there are new fields in default_hparams,
# we add them to the current hparams
default_config = default_hparams.values()
config = hparams.values()
for key in default_config:
if key not in config:
hparams.add_hparam(key, default_config[key])
# Update all hparams' keys if override_loaded_hparams=True
if getattr(default_hparams, "override_loaded_hparams", None):
overwritten_keys = default_config.keys()
else:
# For inference
overwritten_keys = INFERENCE_KEYS
for key in overwritten_keys:
if getattr(hparams, key) != default_config[key]:
utils.print_out("# Updating hparams.%s: %s -> %s" %
(key, str(getattr(hparams, key)),
str(default_config[key])))
setattr(hparams, key, default_config[key])
return hparams
def create_or_load_hparams(
out_dir, default_hparams, hparams_path, save_hparams=True):
"""Create hparams or load hparams from out_dir."""
hparams = utils.load_hparams(out_dir)
if not hparams:
hparams = default_hparams
hparams = utils.maybe_parse_standard_hparams(
hparams, hparams_path)
else:
hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Save HParams
if save_hparams:
utils.save_hparams(out_dir, hparams)
for metric in hparams.metrics:
utils.save_hparams(getattr(hparams, "best_" + metric + "_dir"), hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, train_fn, inference_fn, target_session=""):
"""Run main."""
# Job
jobid = flags.jobid
num_workers = flags.num_workers
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
# Model output directory
out_dir = flags.out_dir
if out_dir and not tf.gfile.Exists(out_dir):
utils.print_out("# Creating output directory %s ..." % out_dir)
tf.gfile.MakeDirs(out_dir)
# Load hparams.
loaded_hparams = False
if flags.ckpt: # Try to load hparams from the same directory as ckpt
ckpt_dir = os.path.dirname(flags.ckpt)
ckpt_hparams_file = os.path.join(ckpt_dir, "hparams")
if tf.gfile.Exists(ckpt_hparams_file) or flags.hparams_path:
hparams = create_or_load_hparams(
ckpt_dir, default_hparams, flags.hparams_path,
save_hparams=False)
loaded_hparams = True
if not loaded_hparams: # Try to load from out_dir
assert out_dir
hparams = create_or_load_hparams(
out_dir, default_hparams, flags.hparams_path,
save_hparams=(jobid == 0))
# GPU device
config_proto = utils.get_config_proto(
allow_soft_placement=True,
num_intra_threads=hparams.num_intra_threads,
num_inter_threads=hparams.num_inter_threads)
utils.print_out(
"# Devices visible to TensorFlow: %s"
% repr(tf.Session(config=config_proto).list_devices()))
## Train / Decode
if flags.inference_input_file:
# Inference output directory
trans_file = flags.inference_output_file
assert trans_file
trans_dir = os.path.dirname(trans_file)
if not tf.gfile.Exists(trans_dir): tf.gfile.MakeDirs(trans_dir)
# Inference indices
hparams.inference_indices = None
if flags.inference_list:
(hparams.inference_indices) = (
[int(token) for token in flags.inference_list.split(",")])
# Inference
ckpt = flags.ckpt
if not ckpt:
ckpt = tf.train.latest_checkpoint(out_dir)
inference_fn(flags.run,flags.iterations,ckpt, flags.inference_input_file,
trans_file, hparams, num_workers, jobid)
# Evaluation
if flags.run == 'accuracy':
ref_file = flags.inference_ref_file
if ref_file and tf.gfile.Exists(trans_file):
for metric in hparams.metrics:
score = evaluation_utils.evaluate(
ref_file,
trans_file,
metric,
hparams.subword_option)
utils.print_out(" %s: %.1f" % (metric, score))
else:
# Train
train_fn(hparams, target_session=target_session)
def main(unused_argv):
default_hparams = create_hparams(FLAGS)
train_fn = train.train
inference_fn = inference.inference
run_main(FLAGS, default_hparams, train_fn, inference_fn)
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {
"content_hash": "2a52de6c2879b313db35dcddb84de45c",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 80,
"avg_line_length": 41.585470085470085,
"alnum_prop": 0.6281642859589628,
"repo_name": "mlperf/inference_results_v0.5",
"id": "ce9e683e145e3e92c10d3b5f5a5065ec4930785a",
"size": "29871",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "closed/CentaurTechnology/code/gnmt/0/nmt/nmt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3233"
},
{
"name": "C",
"bytes": "3952061"
},
{
"name": "C++",
"bytes": "4248758"
},
{
"name": "CMake",
"bytes": "74513"
},
{
"name": "CSS",
"bytes": "28485"
},
{
"name": "Cuda",
"bytes": "234319"
},
{
"name": "Dockerfile",
"bytes": "18506"
},
{
"name": "HTML",
"bytes": "2890"
},
{
"name": "Makefile",
"bytes": "76919"
},
{
"name": "Python",
"bytes": "1573121"
},
{
"name": "Shell",
"bytes": "151430"
}
],
"symlink_target": ""
} |
"""Profiler client APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
def start_tracing(service_addr,
logdir,
duration_ms,
worker_list='',
include_dataset_ops=True,
num_tracing_attempts=3):
"""Sending grpc requests to profiler server to perform on-demand profiling.
Note: This method will block caller thread until receives tracing result.
Args:
service_addr: Address of profiler service e.g. localhost:6009.
logdir: Path of TensorBoard log directory e.g. /tmp/tb_log.
duration_ms: Duration of tracing or monitoring in ms.
worker_list: The list of worker TPUs that we are about to profile in the
current session. (TPU only)
include_dataset_ops: Set to false to profile longer traces.
num_tracing_attempts: Automatically retry N times when no trace event is
collected.
Raises:
UnavailableError: If no trace event is collected.
"""
# TODO(fishx): Uses errors.raise_exception_on_not_ok_status instead.
if not pywrap_tensorflow.TFE_ProfilerClientStartTracing(
service_addr, logdir, worker_list, include_dataset_ops, duration_ms,
num_tracing_attempts):
raise errors.UnavailableError(None, None, 'No trace event is collected.')
| {
"content_hash": "4e5cc5567f9889fba4c11d5400626abd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 38.18421052631579,
"alnum_prop": 0.6995175740868367,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "6f09d8b63419f4f837f74cd59fb1b3083b7d968b",
"size": "2140",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/profiler_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
} |
import tempfile
try:
import requests
HAVE_REQUESTS = True
except ImportError:
HAVE_REQUESTS = False
from viper.common.out import bold
from viper.common.abstracts import Module
from viper.core.session import __sessions__
VIRUSTOTAL_URL = 'https://www.virustotal.com/vtapi/v2/file/report'
VIRUSTOTAL_URL_SUBMIT = 'https://www.virustotal.com/vtapi/v2/file/scan'
VIRUSTOTAL_URL_DOWNLOAD = 'https://www.virustotal.com/vtapi/v2/file/download'
VIRUSTOTAL_URL_COMMENT = 'https://www.virustotal.com/vtapi/v2/comments/put'
KEY = 'a0283a2c3d55728300d064874239b5346fb991317e8449fe43c902879d758088'
class VirusTotal(Module):
cmd = 'virustotal'
description = 'Lookup the file on VirusTotal'
authors = ['nex']
def __init__(self):
super(VirusTotal, self).__init__()
self.parser.add_argument('-s', '--submit', action='store_true', help='Submit file to VirusTotal (by default it only looks up the hash)')
self.parser.add_argument('-d','--download', action='store', dest='hash')
self.parser.add_argument('-c','--comment',nargs='+', action='store', dest='comment')
def run(self):
super(VirusTotal, self).run()
if self.args is None:
return
if self.args.hash:
try:
params = {'apikey': KEY,'hash':self.args.hash}
response = requests.get(VIRUSTOTAL_URL_DOWNLOAD, params=params)
if response.status_code == 403:
self.log('error','This command requires virustotal private API key')
self.log('error','Please check that your key have the right permissions')
return
if response.status_code == 200:
response = response.content
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(response)
tmp.close()
return __sessions__.new(tmp.name)
except Exception as e:
self.log('error', "Failed Download: {0}".format(e))
if not HAVE_REQUESTS:
self.log('error', "Missing dependency, install requests (`pip install requests`)")
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
data = {'resource': __sessions__.current.file.md5, 'apikey': KEY}
try:
response = requests.post(VIRUSTOTAL_URL, data=data)
except Exception as e:
self.log('error', "Failed performing request: {0}".format(e))
return
try:
virustotal = response.json()
# since python 2.7 the above line causes the Error dict object not callable
except Exception as e:
# workaround in case of python 2.7
if str(e) == "'dict' object is not callable":
try:
virustotal = response.json
except Exception as e:
self.log('error', "Failed parsing the response: {0}".format(e))
self.log('error', "Data:\n{}".format(response.content))
return
else:
self.log('error', "Failed parsing the response: {0}".format(e))
self.log('error', "Data:\n{}".format(response.content))
return
rows = []
if 'scans' in virustotal:
for engine, signature in virustotal['scans'].items():
if signature['detected']:
signature = signature['result']
else:
signature = ''
rows.append([engine, signature])
rows.sort()
if rows:
self.log('info', "VirusTotal Report:")
self.log('table', dict(header=['Antivirus', 'Signature'], rows=rows))
if self.args.submit:
self.log('', "")
self.log('info', "The file is already available on VirusTotal, no need to submit")
else:
self.log('info', "The file does not appear to be on VirusTotal yet")
if self.args.submit:
try:
data = {'apikey': KEY}
files = {'file': open(__sessions__.current.file.path, 'rb').read()}
response = requests.post(VIRUSTOTAL_URL_SUBMIT, data=data, files=files)
except Exception as e:
self.log('error', "Failed Submit: {0}".format(e))
return
try:
virustotal = response.json()
# since python 2.7 the above line causes the Error dict object not callable
except Exception as e:
# workaround in case of python 2.7
if str(e) == "'dict' object is not callable":
try:
virustotal = response.json
except Exception as e:
self.log('error', "Failed parsing the response: {0}".format(e))
self.log('error', "Data:\n{}".format(response.content))
return
else:
self.log('error', "Failed parsing the response: {0}".format(e))
self.log('error', "Data:\n{}".format(response.content))
return
if 'verbose_msg' in virustotal:
self.log('info', "{}: {}".format(bold("VirusTotal message"), virustotal['verbose_msg']))
if self.args.comment:
try:
data = {'apikey' : KEY, 'resource': __sessions__.current.file.md5, 'comment' : ' '.join(self.args.comment)}
response = requests.post(VIRUSTOTAL_URL_COMMENT,data=data)
except Exception as e:
self.log('error',"Failed Submit Comment: {0}".format(e))
return
try:
virustotal = response.json()
# since python 2.7 the above line causes the Error dict object not callable
except Exception as e:
# workaround in case of python 2.7
if str(e) == "'dict' object is not callable":
try:
virustotal = response.json
except Exception as e:
self.log('error',"Failed parsing the response: {0}".format(e))
self.log('error',"Data:\n{}".format(response.content))
return
else:
self.log('error',"Failed parsing the response: {0}".format(e))
self.log('error',"Data:\n{}".format(response.content))
return
if 'verbose_msg' in virustotal:
self.log('info',("{}: {}".format(bold("VirusTotal message"), virustotal['verbose_msg'])))
return
| {
"content_hash": "ed7ffceb61c6cf80a811d47fcbd3888b",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 144,
"avg_line_length": 40.94705882352941,
"alnum_prop": 0.5216204568309151,
"repo_name": "jorik041/viper",
"id": "5e2c4d76dc1d34acedcc16b2efea8c0ec3412d5a",
"size": "7078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/virustotal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1052"
},
{
"name": "JavaScript",
"bytes": "9295"
},
{
"name": "Python",
"bytes": "1321042"
},
{
"name": "Smarty",
"bytes": "30004"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import xmlrpclib
from .genicode import GENICODE
class SfaFault(xmlrpclib.Fault):
def __init__(self, faultCode, faultString, extra = None):
if extra:
faultString += ": " + str(extra)
xmlrpclib.Fault.__init__(self, faultCode, faultString)
class SfaInvalidAPIMethod(SfaFault):
def __init__(self, method, interface = None, extra = None):
faultString = "Invalid method " + method
if interface:
faultString += " for interface " + interface
SfaFault.__init__(self, GENICODE.UNSUPPORTED, faultString, extra)
class SfaInvalidArgumentCount(SfaFault):
def __init__(self, got, min, max = min, extra = None):
if min != max:
expected = "%d-%d" % (min, max)
else:
expected = "%d" % min
faultString = "Expected %s arguments, got %d" % \
(expected, got)
SfaFault.__init__(self, GENICODE.BADARGS, faultString, extra)
class SfaInvalidArgument(SfaFault):
def __init__(self, extra = None, name = None):
if name is not None:
faultString = "Invalid %s value" % name
else:
faultString = "Invalid argument"
SfaFault.__init__(self, GENICODE.BADARGS, faultString, extra)
class SfaAuthenticationFailure(SfaFault):
def __init__(self, extra = None):
faultString = "Failed to authenticate call"
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
class SfaDBError(SfaFault):
def __init__(self, extra = None):
faultString = "Database error"
SfaFault.__init__(self, GENICODE.DBERROR, faultString, extra)
class SfaPermissionDenied(SfaFault):
def __init__(self, extra = None):
faultString = "Permission denied"
SfaFault.__init__(self, GENICODE.FORBIDDEN, faultString, extra)
class SfaNotImplemented(SfaFault):
def __init__(self, interface=None, extra = None):
faultString = "Not implemented"
if interface:
faultString += " at interface " + interface
SfaFault.__init__(self, GENICODE.UNSUPPORTED, faultString, extra)
class SfaAPIError(SfaFault):
def __init__(self, extra = None):
faultString = "Internal SFA API error"
SfaFault.__init__(self, GENICODE.SERVERERROR, faultString, extra)
class MalformedHrnException(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Malformed HRN: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, extra)
def __str__(self):
return repr(self.value)
class TreeException(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Tree Exception: %(value)s, " % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class NonExistingRecord(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Non exsiting record %(value)s, " % locals()
SfaFault.__init__(self, GENICODE.SEARCHFAILED, faultString, extra)
def __str__(self):
return repr(self.value)
class ExistingRecord(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Existing record: %(value)s, " % locals()
SfaFault.__init__(self, GENICODE.REFUSED, faultString, extra)
def __str__(self):
return repr(self.value)
class InvalidRPCParams(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Invalid RPC Params: %(value)s, " % locals()
SfaFault.__init__(self, GENICODE.RPCERROR, faultString, extra)
def __str__(self):
return repr(self.value)
# SMBAKER exceptions follow
class ConnectionKeyGIDMismatch(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Connection Key GID mismatch: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingCallerGID(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing Caller GID: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class RecordNotFound(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Record not found: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class UnknownSfaType(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Unknown SFA Type: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingAuthority(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing authority: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class PlanetLabRecordDoesNotExist(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "PlanetLab record does not exist : %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class PermissionError(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Permission error: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.FORBIDDEN, faultString, extra)
def __str__(self):
return repr(self.value)
class InsufficientRights(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Insufficient rights: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.FORBIDDEN, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingDelegateBit(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing delegate bit: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.FORBIDDEN, faultString, extra)
def __str__(self):
return repr(self.value)
class ChildRightsNotSubsetOfParent(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Child rights not subset of parent: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.FORBIDDEN, faultString, extra)
def __str__(self):
return repr(self.value)
class CertMissingParent(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Cert missing parent: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class CertNotSignedByParent(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Cert not signed by parent: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class GidParentHrn(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Cert URN is not an extension of its parent: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class GidInvalidParentHrn(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "GID invalid parent hrn: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class SliverDoesNotExist(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Sliver does not exist : %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class BadRequestHash(xmlrpclib.Fault):
def __init__(self, hash = None, extra = None):
faultString = "bad request hash: " + str(hash)
xmlrpclib.Fault.__init__(self, GENICODE.ERROR, faultString)
class MissingTrustedRoots(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Trusted root directory does not exist: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.SERVERERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingSfaInfo(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing information: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class InvalidRSpec(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Invalid RSpec: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class InvalidRSpecVersion(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Invalid RSpec version: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.BADVERSION, faultString, extra)
def __str__(self):
return repr(self.value)
class UnsupportedRSpecVersion(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Unsupported RSpec version: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.UNSUPPORTED, faultString, extra)
def __str__(self):
return repr(self.value)
class InvalidRSpecElement(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Invalid RSpec Element: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class InvalidXML(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Invalid XML Document: %(value)s" % locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class AccountNotEnabled(SfaFault):
def __init__(self, extra = None):
faultString = "Account Disabled"
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class CredentialNotVerifiable(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Unable to verify credential: %(value)s, " %locals()
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
def __str__(self):
return repr(self.value)
class CertExpired(SfaFault):
def __init__(self, value, extra=None):
self.value = value
faultString = "%s cert is expired" % value
SfaFault.__init__(self, GENICODE.ERROR, faultString, extra)
| {
"content_hash": "6d9172621df8746febd118057da477b5",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 88,
"avg_line_length": 37.37012987012987,
"alnum_prop": 0.6214596003475239,
"repo_name": "tcmitchell/geni-tools",
"id": "6e7a1d47f1077de09817c044f8b2b405a26f967b",
"size": "12765",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "src/gcf/sfa/util/faults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "87278"
},
{
"name": "M4",
"bytes": "217"
},
{
"name": "Makefile",
"bytes": "5631"
},
{
"name": "Python",
"bytes": "2611491"
},
{
"name": "Shell",
"bytes": "12644"
},
{
"name": "Visual Basic",
"bytes": "668"
}
],
"symlink_target": ""
} |
setupdict= {
'name': 'txAMQP',
'version': '0.6.2',
'author': 'Esteve Fernandez',
'author_email': '[email protected]',
'url': 'https://launchpad.net/txamqp',
'description': 'Python library for communicating with AMQP peers and brokers using Twisted',
'long_description': '''This project contains all the necessary code to connect, send and receive messages to/from an AMQP-compliant peer or broker (Qpid, OpenAMQ, RabbitMQ) using Twisted.
It also includes support for using Thrift RPC over AMQP in Twisted applications.
txAMQP is sponsored by the friendly folks at Fluidinfo (http://www.fluidinfo.com).'''
}
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
setupdict['packages'] = ['txamqp', 'txamqp.contrib', 'txamqp.contrib.thrift']
setupdict['package_dir'] = {
'txamqp': 'src/txamqp',
'txamqp.contrib': 'src/txamqp/contrib',
'txamqp.contrib.thrift': 'src/txamqp/contrib/thrift',
}
else:
setupdict['packages'] = find_packages('src')
setupdict['package_dir'] = { '': 'src' }
setupdict['install_requires'] = ['Twisted']
setup(**setupdict)
| {
"content_hash": "a964c7f9332b5d266889f84f996128e7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 191,
"avg_line_length": 39.8,
"alnum_prop": 0.6758793969849246,
"repo_name": "beevek/txamqp",
"id": "026b154abc14334a9d659d7d70f2559c9b5d61ac",
"size": "1194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "151979"
}
],
"symlink_target": ""
} |
import os
import sys
import builtins
import contextlib
import difflib
import inspect
import pydoc
import keyword
import _pickle
import pkgutil
import re
import string
import test.support
import time
import types
import unittest
import xml.etree
import textwrap
from io import StringIO
from collections import namedtuple
from test.script_helper import assert_python_ok
from test.support import (
TESTFN, rmtree,
reap_children, reap_threads, captured_output, captured_stdout,
captured_stderr, unlink, requires_docstrings
)
from test import pydoc_mod
try:
import threading
except ImportError:
threading = None
if test.support.HAVE_DOCSTRINGS:
expected_data_docstrings = (
'dictionary for instance variables (if defined)',
'list of weak references to the object (if defined)',
) * 2
else:
expected_data_docstrings = ('', '', '', '')
expected_text_pattern = """
NAME
test.pydoc_mod - This is a test module for test_pydoc
%s
CLASSES
builtins.object
A
B
\x20\x20\x20\x20
class A(builtins.object)
| Hello and goodbye
|\x20\x20
| Methods defined here:
|\x20\x20
| __init__()
| Wow, I have no function!
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors defined here:
|\x20\x20
| __dict__%s
|\x20\x20
| __weakref__%s
\x20\x20\x20\x20
class B(builtins.object)
| Data descriptors defined here:
|\x20\x20
| __dict__%s
|\x20\x20
| __weakref__%s
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|\x20\x20
| NO_MEANING = 'eggs'
FUNCTIONS
doc_func()
This function solves all of the world's problems:
hunger
lack of Python
war
\x20\x20\x20\x20
nodoc_func()
DATA
__xyz__ = 'X, Y and Z'
VERSION
1.2.3.4
AUTHOR
Benjamin Peterson
CREDITS
Nobody
FILE
%s
""".strip()
expected_text_data_docstrings = tuple('\n | ' + s if s else ''
for s in expected_data_docstrings)
expected_html_pattern = """
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="#7799ee">
<td valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"> <br><big><big><strong><a href="test.html"><font color="#ffffff">test</font></a>.pydoc_mod</strong></big></big> (version 1.2.3.4)</font></td
><td align=right valign=bottom
><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="file:%s">%s</a>%s</font></td></tr></table>
<p><tt>This is a test module for test_pydoc</tt></p>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ee77aa">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ee77aa"><tt> </tt></td><td> </td>
<td width="100%%"><dl>
<dt><font face="helvetica, arial"><a href="builtins.html#object">builtins.object</a>
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#A">A</a>
</font></dt><dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#B">B</a>
</font></dt></dl>
</dd>
</dl>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="A">class <strong>A</strong></a>(<a href="builtins.html#object">builtins.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr bgcolor="#ffc8d8"><td rowspan=2><tt> </tt></td>
<td colspan=2><tt>Hello and goodbye<br> </tt></td></tr>
<tr><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="A-__init__"><strong>__init__</strong></a>()</dt><dd><tt>Wow, I have no function!</tt></dd></dl>
<hr>
Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="B">class <strong>B</strong></a>(<a href="builtins.html#object">builtins.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<hr>
Data and other attributes defined here:<br>
<dl><dt><strong>NO_MEANING</strong> = 'eggs'</dl>
</td></tr></table></td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#eeaa77"><tt> </tt></td><td> </td>
<td width="100%%"><dl><dt><a name="-doc_func"><strong>doc_func</strong></a>()</dt><dd><tt>This function solves all of the world's problems:<br>
hunger<br>
lack of Python<br>
war</tt></dd></dl>
<dl><dt><a name="-nodoc_func"><strong>nodoc_func</strong></a>()</dt></dl>
</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#55aa55">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#55aa55"><tt> </tt></td><td> </td>
<td width="100%%"><strong>__xyz__</strong> = 'X, Y and Z'</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Author</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Benjamin Peterson</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Credits</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Nobody</td></tr></table>
""".strip() # ' <- emacs turd
expected_html_data_docstrings = tuple(s.replace(' ', ' ')
for s in expected_data_docstrings)
# output pattern for missing module
missing_pattern = "no Python documentation found for '%s'"
# output pattern for module with bad imports
badimport_pattern = "problem in %s - ImportError: No module named %r"
expected_dynamicattribute_pattern = """
Help on class DA in module %s:
class DA(builtins.object)
| Data descriptors defined here:
|\x20\x20
| __dict__%s
|\x20\x20
| __weakref__%s
|\x20\x20
| ham
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes inherited from Meta:
|\x20\x20
| ham = 'spam'
""".strip()
expected_virtualattribute_pattern1 = """
Help on class Class in module %s:
class Class(builtins.object)
| Data and other attributes inherited from Meta:
|\x20\x20
| LIFE = 42
""".strip()
expected_virtualattribute_pattern2 = """
Help on class Class1 in module %s:
class Class1(builtins.object)
| Data and other attributes inherited from Meta1:
|\x20\x20
| one = 1
""".strip()
expected_virtualattribute_pattern3 = """
Help on class Class2 in module %s:
class Class2(Class1)
| Method resolution order:
| Class2
| Class1
| builtins.object
|\x20\x20
| Data and other attributes inherited from Meta1:
|\x20\x20
| one = 1
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes inherited from Meta3:
|\x20\x20
| three = 3
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes inherited from Meta2:
|\x20\x20
| two = 2
""".strip()
expected_missingattribute_pattern = """
Help on class C in module %s:
class C(builtins.object)
| Data and other attributes defined here:
|\x20\x20
| here = 'present!'
""".strip()
def run_pydoc(module_name, *args, **env):
"""
Runs pydoc on the specified module. Returns the stripped
output of pydoc.
"""
args = args + (module_name,)
# do not write bytecode files to avoid caching errors
rc, out, err = assert_python_ok('-B', pydoc.__file__, *args, **env)
return out.strip()
def get_pydoc_html(module):
"Returns pydoc generated output as html"
doc = pydoc.HTMLDoc()
output = doc.docmodule(module)
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "<br><a href=\"" + loc + "\">Module Docs</a>"
return output.strip(), loc
def get_pydoc_text(module):
"Returns pydoc generated output as text"
doc = pydoc.TextDoc()
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "\nMODULE DOCS\n " + loc + "\n"
output = doc.docmodule(module)
# clean up the extra text formatting that pydoc performs
patt = re.compile('\b.')
output = patt.sub('', output)
return output.strip(), loc
def print_diffs(text1, text2):
"Prints unified diffs for two texts"
# XXX now obsolete, use unittest built-in support
lines1 = text1.splitlines(keepends=True)
lines2 = text2.splitlines(keepends=True)
diffs = difflib.unified_diff(lines1, lines2, n=0, fromfile='expected',
tofile='got')
print('\n' + ''.join(diffs))
def get_html_title(text):
# Bit of hack, but good enough for test purposes
header, _, _ = text.partition("</head>")
_, _, title = header.partition("<title>")
title, _, _ = title.partition("</title>")
return title
class PydocBaseTest(unittest.TestCase):
def _restricted_walk_packages(self, walk_packages, path=None):
"""
A version of pkgutil.walk_packages() that will restrict itself to
a given path.
"""
default_path = path or [os.path.dirname(__file__)]
def wrapper(path=None, prefix='', onerror=None):
return walk_packages(path or default_path, prefix, onerror)
return wrapper
@contextlib.contextmanager
def restrict_walk_packages(self, path=None):
walk_packages = pkgutil.walk_packages
pkgutil.walk_packages = self._restricted_walk_packages(walk_packages,
path)
try:
yield
finally:
pkgutil.walk_packages = walk_packages
class PydocDocTest(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_html_doc(self):
result, doc_loc = get_pydoc_html(pydoc_mod)
mod_file = inspect.getabsfile(pydoc_mod)
if sys.platform == 'win32':
import nturl2path
mod_url = nturl2path.pathname2url(mod_file)
else:
mod_url = mod_file
expected_html = expected_html_pattern % (
(mod_url, mod_file, doc_loc) +
expected_html_data_docstrings)
if result != expected_html:
print_diffs(expected_html, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_text_doc(self):
result, doc_loc = get_pydoc_text(pydoc_mod)
expected_text = expected_text_pattern % (
(doc_loc,) +
expected_text_data_docstrings +
(inspect.getabsfile(pydoc_mod),))
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
def test_issue8225(self):
# Test issue8225 to ensure no doc link appears for xml.etree
result, doc_loc = get_pydoc_text(xml.etree)
self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
def test_non_str_name(self):
# issue14638
# Treat illegal (non-str) name like no name
class A:
__name__ = 42
class B:
pass
adoc = pydoc.render_doc(A())
bdoc = pydoc.render_doc(B())
self.assertEqual(adoc.replace("A", "B"), bdoc)
def test_not_here(self):
missing_module = "test.i_am_not_here"
result = str(run_pydoc(missing_module), 'ascii')
expected = missing_pattern % missing_module
self.assertEqual(expected, result,
"documentation for missing module found")
def test_input_strip(self):
missing_module = " test.i_am_not_here "
result = str(run_pydoc(missing_module), 'ascii')
expected = missing_pattern % missing_module.strip()
self.assertEqual(expected, result)
def test_stripid(self):
# test with strings, other implementations might have different repr()
stripid = pydoc.stripid
# strip the id
self.assertEqual(stripid('<function stripid at 0x88dcee4>'),
'<function stripid>')
self.assertEqual(stripid('<function stripid at 0x01F65390>'),
'<function stripid>')
# nothing to strip, return the same text
self.assertEqual(stripid('42'), '42')
self.assertEqual(stripid("<type 'exceptions.Exception'>"),
"<type 'exceptions.Exception'>")
@unittest.skipIf(sys.flags.optimize >= 2,
'Docstrings are omitted with -O2 and above')
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_help_output_redirect(self):
# issue 940286, if output is set in Helper, then all output from
# Helper.help should be redirected
old_pattern = expected_text_pattern
getpager_old = pydoc.getpager
getpager_new = lambda: (lambda x: x)
self.maxDiff = None
buf = StringIO()
helper = pydoc.Helper(output=buf)
unused, doc_loc = get_pydoc_text(pydoc_mod)
module = "test.pydoc_mod"
help_header = """
Help on module test.pydoc_mod in test:
""".lstrip()
help_header = textwrap.dedent(help_header)
expected_help_pattern = help_header + expected_text_pattern
pydoc.getpager = getpager_new
try:
with captured_output('stdout') as output, \
captured_output('stderr') as err:
helper.help(module)
result = buf.getvalue().strip()
expected_text = expected_help_pattern % (
(doc_loc,) +
expected_text_data_docstrings +
(inspect.getabsfile(pydoc_mod),))
self.assertEqual('', output.getvalue())
self.assertEqual('', err.getvalue())
self.assertEqual(expected_text, result)
finally:
pydoc.getpager = getpager_old
def test_namedtuple_public_underscore(self):
NT = namedtuple('NT', ['abc', 'def'], rename=True)
with captured_stdout() as help_io:
pydoc.help(NT)
helptext = help_io.getvalue()
self.assertIn('_1', helptext)
self.assertIn('_replace', helptext)
self.assertIn('_asdict', helptext)
def test_synopsis(self):
self.addCleanup(unlink, TESTFN)
for encoding in ('ISO-8859-1', 'UTF-8'):
with open(TESTFN, 'w', encoding=encoding) as script:
if encoding != 'UTF-8':
print('#coding: {}'.format(encoding), file=script)
print('"""line 1: h\xe9', file=script)
print('line 2: hi"""', file=script)
synopsis = pydoc.synopsis(TESTFN, {})
self.assertEqual(synopsis, 'line 1: h\xe9')
def test_synopsis_sourceless(self):
expected = os.__doc__.splitlines()[0]
filename = os.__cached__
synopsis = pydoc.synopsis(filename)
self.assertEqual(synopsis, expected)
def test_splitdoc_with_description(self):
example_string = "I Am A Doc\n\n\nHere is my description"
self.assertEqual(pydoc.splitdoc(example_string),
('I Am A Doc', '\nHere is my description'))
def test_is_object_or_method(self):
doc = pydoc.Doc()
# Bound Method
self.assertTrue(pydoc._is_some_method(doc.fail))
# Method Descriptor
self.assertTrue(pydoc._is_some_method(int.__add__))
# String
self.assertFalse(pydoc._is_some_method("I am not a method"))
def test_is_package_when_not_package(self):
with test.support.temp_cwd() as test_dir:
self.assertFalse(pydoc.ispackage(test_dir))
def test_is_package_when_is_package(self):
with test.support.temp_cwd() as test_dir:
init_path = os.path.join(test_dir, '__init__.py')
open(init_path, 'w').close()
self.assertTrue(pydoc.ispackage(test_dir))
os.remove(init_path)
def test_allmethods(self):
# issue 17476: allmethods was no longer returning unbound methods.
# This test is a bit fragile in the face of changes to object and type,
# but I can't think of a better way to do it without duplicating the
# logic of the function under test.
class TestClass(object):
def method_returning_true(self):
return True
# What we expect to get back: everything on object...
expected = dict(vars(object))
# ...plus our unbound method...
expected['method_returning_true'] = TestClass.method_returning_true
# ...but not the non-methods on object.
del expected['__doc__']
del expected['__class__']
# inspect resolves descriptors on type into methods, but vars doesn't,
# so we need to update __subclasshook__.
expected['__subclasshook__'] = TestClass.__subclasshook__
methods = pydoc.allmethods(TestClass)
self.assertDictEqual(methods, expected)
class PydocImportTest(PydocBaseTest):
def setUp(self):
self.test_dir = os.mkdir(TESTFN)
self.addCleanup(rmtree, TESTFN)
def test_badimport(self):
# This tests the fix for issue 5230, where if pydoc found the module
# but the module had an internal import error pydoc would report no doc
# found.
modname = 'testmod_xyzzy'
testpairs = (
('i_am_not_here', 'i_am_not_here'),
('test.i_am_not_here_either', 'test.i_am_not_here_either'),
('test.i_am_not_here.neither_am_i', 'test.i_am_not_here'),
('i_am_not_here.{}'.format(modname), 'i_am_not_here'),
('test.{}'.format(modname), 'test.{}'.format(modname)),
)
sourcefn = os.path.join(TESTFN, modname) + os.extsep + "py"
for importstring, expectedinmsg in testpairs:
with open(sourcefn, 'w') as f:
f.write("import {}\n".format(importstring))
result = run_pydoc(modname, PYTHONPATH=TESTFN).decode("ascii")
expected = badimport_pattern % (modname, expectedinmsg)
self.assertEqual(expected, result)
def test_apropos_with_bad_package(self):
# Issue 7425 - pydoc -k failed when bad package on path
pkgdir = os.path.join(TESTFN, "syntaxerr")
os.mkdir(pkgdir)
badsyntax = os.path.join(pkgdir, "__init__") + os.extsep + "py"
with open(badsyntax, 'w') as f:
f.write("invalid python syntax = $1\n")
with self.restrict_walk_packages(path=[TESTFN]):
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('xyzzy')
# No result, no error
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
# The package name is still matched
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('syntaxerr')
self.assertEqual(out.getvalue().strip(), 'syntaxerr')
self.assertEqual(err.getvalue(), '')
def test_apropos_with_unreadable_dir(self):
# Issue 7367 - pydoc -k failed when unreadable dir on path
self.unreadable_dir = os.path.join(TESTFN, "unreadable")
os.mkdir(self.unreadable_dir, 0)
self.addCleanup(os.rmdir, self.unreadable_dir)
# Note, on Windows the directory appears to be still
# readable so this is not really testing the issue there
with self.restrict_walk_packages(path=[TESTFN]):
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('SOMEKEY')
# No result, no error
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
@unittest.skip('causes undesireable side-effects (#20128)')
def test_modules(self):
# See Helper.listmodules().
num_header_lines = 2
num_module_lines_min = 5 # Playing it safe.
num_footer_lines = 3
expected = num_header_lines + num_module_lines_min + num_footer_lines
output = StringIO()
helper = pydoc.Helper(output=output)
helper('modules')
result = output.getvalue().strip()
num_lines = len(result.splitlines())
self.assertGreaterEqual(num_lines, expected)
@unittest.skip('causes undesireable side-effects (#20128)')
def test_modules_search(self):
# See Helper.listmodules().
expected = 'pydoc - '
output = StringIO()
helper = pydoc.Helper(output=output)
with captured_stdout() as help_io:
helper('modules pydoc')
result = help_io.getvalue()
self.assertIn(expected, result)
@unittest.skip('some buildbots are not cooperating (#20128)')
def test_modules_search_builtin(self):
expected = 'gc - '
output = StringIO()
helper = pydoc.Helper(output=output)
with captured_stdout() as help_io:
helper('modules garbage')
result = help_io.getvalue()
self.assertTrue(result.startswith(expected))
def test_importfile(self):
loaded_pydoc = pydoc.importfile(pydoc.__file__)
self.assertIsNot(loaded_pydoc, pydoc)
self.assertEqual(loaded_pydoc.__name__, 'pydoc')
self.assertEqual(loaded_pydoc.__file__, pydoc.__file__)
self.assertEqual(loaded_pydoc.__spec__, pydoc.__spec__)
class TestDescriptions(unittest.TestCase):
def test_module(self):
# Check that pydocfodder module can be described
from test import pydocfodder
doc = pydoc.render_doc(pydocfodder)
self.assertIn("pydocfodder", doc)
def test_class(self):
class C: "New-style class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'C')
expected = 'C in module %s object' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_builtin(self):
for name in ('str', 'str.translate', 'builtins.str',
'builtins.str.translate'):
# test low-level function
self.assertIsNotNone(pydoc.locate(name))
# test high-level function
try:
pydoc.render_doc(name)
except ImportError:
self.fail('finding the doc of {!r} failed'.format(o))
for name in ('notbuiltins', 'strrr', 'strr.translate',
'str.trrrranslate', 'builtins.strrr',
'builtins.str.trrranslate'):
self.assertIsNone(pydoc.locate(name))
self.assertRaises(ImportError, pydoc.render_doc, name)
@staticmethod
def _get_summary_line(o):
text = pydoc.plain(pydoc.render_doc(o))
lines = text.split('\n')
assert len(lines) >= 2
return lines[2]
# these should include "self"
def test_unbound_python_method(self):
self.assertEqual(self._get_summary_line(textwrap.TextWrapper.wrap),
"wrap(self, text)")
@requires_docstrings
def test_unbound_builtin_method(self):
self.assertEqual(self._get_summary_line(_pickle.Pickler.dump),
"dump(self, obj, /)")
# these no longer include "self"
def test_bound_python_method(self):
t = textwrap.TextWrapper()
self.assertEqual(self._get_summary_line(t.wrap),
"wrap(text) method of textwrap.TextWrapper instance")
@requires_docstrings
def test_bound_builtin_method(self):
s = StringIO()
p = _pickle.Pickler(s)
self.assertEqual(self._get_summary_line(p.dump),
"dump(obj, /) method of _pickle.Pickler instance")
# this should *never* include self!
@requires_docstrings
def test_module_level_callable(self):
self.assertEqual(self._get_summary_line(os.stat),
"stat(path, *, dir_fd=None, follow_symlinks=True)")
@unittest.skipUnless(threading, 'Threading required for this test.')
class PydocServerTest(unittest.TestCase):
"""Tests for pydoc._start_server"""
def test_server(self):
# Minimal test that starts the server, then stops it.
def my_url_handler(url, content_type):
text = 'the URL sent was: (%s, %s)' % (url, content_type)
return text
serverthread = pydoc._start_server(my_url_handler, port=0)
starttime = time.time()
timeout = 1 #seconds
while serverthread.serving:
time.sleep(.01)
if serverthread.serving and time.time() - starttime > timeout:
serverthread.stop()
break
self.assertEqual(serverthread.error, None)
class PydocUrlHandlerTest(PydocBaseTest):
"""Tests for pydoc._url_handler"""
def test_content_type_err(self):
f = pydoc._url_handler
self.assertRaises(TypeError, f, 'A', '')
self.assertRaises(TypeError, f, 'B', 'foobar')
def test_url_requests(self):
# Test for the correct title in the html pages returned.
# This tests the different parts of the URL handler without
# getting too picky about the exact html.
requests = [
("", "Pydoc: Index of Modules"),
("get?key=", "Pydoc: Index of Modules"),
("index", "Pydoc: Index of Modules"),
("topics", "Pydoc: Topics"),
("keywords", "Pydoc: Keywords"),
("pydoc", "Pydoc: module pydoc"),
("get?key=pydoc", "Pydoc: module pydoc"),
("search?key=pydoc", "Pydoc: Search Results"),
("topic?key=def", "Pydoc: KEYWORD def"),
("topic?key=STRINGS", "Pydoc: TOPIC STRINGS"),
("foobar", "Pydoc: Error - foobar"),
("getfile?key=foobar", "Pydoc: Error - getfile?key=foobar"),
]
with self.restrict_walk_packages():
for url, title in requests:
text = pydoc._url_handler(url, "text/html")
result = get_html_title(text)
self.assertEqual(result, title, text)
path = string.__file__
title = "Pydoc: getfile " + path
url = "getfile?key=" + path
text = pydoc._url_handler(url, "text/html")
result = get_html_title(text)
self.assertEqual(result, title)
class TestHelper(unittest.TestCase):
def test_keywords(self):
self.assertEqual(sorted(pydoc.Helper.keywords),
sorted(keyword.kwlist))
class PydocWithMetaClasses(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_DynamicClassAttribute(self):
class Meta(type):
def __getattr__(self, name):
if name == 'ham':
return 'spam'
return super().__getattr__(name)
class DA(metaclass=Meta):
@types.DynamicClassAttribute
def ham(self):
return 'eggs'
expected_text_data_docstrings = tuple('\n | ' + s if s else ''
for s in expected_data_docstrings)
output = StringIO()
helper = pydoc.Helper(output=output)
helper(DA)
expected_text = expected_dynamicattribute_pattern % (
(__name__,) + expected_text_data_docstrings[:2])
result = output.getvalue().strip()
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_virtualClassAttributeWithOneMeta(self):
class Meta(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'LIFE']
def __getattr__(self, name):
if name =='LIFE':
return 42
return super().__getattr(name)
class Class(metaclass=Meta):
pass
output = StringIO()
helper = pydoc.Helper(output=output)
helper(Class)
expected_text = expected_virtualattribute_pattern1 % __name__
result = output.getvalue().strip()
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_virtualClassAttributeWithTwoMeta(self):
class Meta1(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'one']
def __getattr__(self, name):
if name =='one':
return 1
return super().__getattr__(name)
class Meta2(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'two']
def __getattr__(self, name):
if name =='two':
return 2
return super().__getattr__(name)
class Meta3(Meta1, Meta2):
def __dir__(cls):
return list(sorted(set(
['__class__', '__module__', '__name__', 'three'] +
Meta1.__dir__(cls) + Meta2.__dir__(cls))))
def __getattr__(self, name):
if name =='three':
return 3
return super().__getattr__(name)
class Class1(metaclass=Meta1):
pass
class Class2(Class1, metaclass=Meta3):
pass
fail1 = fail2 = False
output = StringIO()
helper = pydoc.Helper(output=output)
helper(Class1)
expected_text1 = expected_virtualattribute_pattern2 % __name__
result1 = output.getvalue().strip()
if result1 != expected_text1:
print_diffs(expected_text1, result1)
fail1 = True
output = StringIO()
helper = pydoc.Helper(output=output)
helper(Class2)
expected_text2 = expected_virtualattribute_pattern3 % __name__
result2 = output.getvalue().strip()
if result2 != expected_text2:
print_diffs(expected_text2, result2)
fail2 = True
if fail1 or fail2:
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_buggy_dir(self):
class M(type):
def __dir__(cls):
return ['__class__', '__name__', 'missing', 'here']
class C(metaclass=M):
here = 'present!'
output = StringIO()
helper = pydoc.Helper(output=output)
helper(C)
expected_text = expected_missingattribute_pattern % __name__
result = output.getvalue().strip()
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
@reap_threads
def test_main():
try:
test.support.run_unittest(PydocDocTest,
PydocImportTest,
TestDescriptions,
PydocServerTest,
PydocUrlHandlerTest,
TestHelper,
PydocWithMetaClasses,
)
finally:
reap_children()
if __name__ == "__main__":
test_main()
| {
"content_hash": "59eefdc6ad1b98997b85e7e38400cf5b",
"timestamp": "",
"source": "github",
"line_count": 939,
"max_line_length": 191,
"avg_line_length": 36.815761448349306,
"alnum_prop": 0.5813711310384727,
"repo_name": "tpsatish95/Python-Workshop",
"id": "81b58a2e802700f6340ea46e474325a3b902385a",
"size": "34570",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/test/test_pydoc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "567768"
},
{
"name": "Batchfile",
"bytes": "15693"
},
{
"name": "C",
"bytes": "16448167"
},
{
"name": "C++",
"bytes": "260088"
},
{
"name": "CSS",
"bytes": "87526"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "218010"
},
{
"name": "HTML",
"bytes": "1724296"
},
{
"name": "JavaScript",
"bytes": "107698"
},
{
"name": "M4",
"bytes": "212070"
},
{
"name": "Makefile",
"bytes": "185615"
},
{
"name": "Objective-C",
"bytes": "33060"
},
{
"name": "PHP",
"bytes": "5975"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Python",
"bytes": "25163253"
},
{
"name": "Roff",
"bytes": "44774"
},
{
"name": "Shell",
"bytes": "398907"
},
{
"name": "TeX",
"bytes": "321262"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Tests for tf_agents.utils.nest_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.utils import nest_utils
# We use this to build {Dict,Tuple,List}Wrappers for testing nesting code.
from tensorflow.python.trackable import data_structures # pylint: disable=g-direct-tensorflow-import # TF internal
# pylint: disable=invalid-name
DictWrapper = data_structures.wrap_or_unwrap
TupleWrapper = data_structures.wrap_or_unwrap
# pylint: enable=invalid-name
class NestedTensorsTest(tf.test.TestCase):
"""Tests functions related to nested tensors."""
def nest_spec(self, shape=(2, 3), dtype=tf.float32, include_sparse=True):
spec = {
'tensor_spec_1':
tensor_spec.TensorSpec(shape, dtype),
'bounded_spec_1':
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10),
'dict_spec': {
'tensor_spec_2':
tensor_spec.TensorSpec(shape, dtype),
'bounded_spec_2':
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10)
},
'tuple_spec': (
tensor_spec.TensorSpec(shape, dtype),
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10),
),
'list_spec': [
tensor_spec.TensorSpec(shape, dtype),
(tensor_spec.TensorSpec(shape, dtype),
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10)),
],
'sparse_tensor_spec': tf.SparseTensorSpec(
shape=shape, dtype=dtype)
}
if not include_sparse:
del spec['sparse_tensor_spec']
return spec
def zeros_from_spec(self, spec, batch_size=None, extra_sizes=None):
"""Return tensors matching spec with desired additional dimensions.
Args:
spec: A `tf.TypeSpec`, e.g. `tf.TensorSpec` or `tf.SparseTensorSpec`.
batch_size: The desired batch size; the size of the first dimension of
all tensors.
extra_sizes: An optional list of additional dimension sizes beyond the
batch_size.
Returns:
A possibly nested tuple of Tensors matching the spec.
"""
tensors = []
extra_sizes = extra_sizes or []
for s in tf.nest.flatten(spec):
if isinstance(s, tf.SparseTensorSpec):
if batch_size:
shape = [batch_size] + extra_sizes + s.shape
rank = 1 + len(extra_sizes) + 2
else:
shape = s.shape
rank = 2
tensors.append(
tf.SparseTensor(
indices=tf.zeros([7, rank], dtype=tf.int64),
values=tf.zeros([7], dtype=s.dtype),
dense_shape=tf.constant(shape.as_list(), dtype=tf.int64)))
elif isinstance(s, tf.TensorSpec):
if batch_size:
shape = tf.TensorShape([batch_size] + extra_sizes).concatenate(
s.shape)
else:
shape = s.shape
tensors.append(tf.zeros(shape, dtype=s.dtype))
else:
raise TypeError('Unexpected spec type: {}'.format(s))
return tf.nest.pack_sequence_as(spec, tensors)
def placeholders_from_spec(self, spec):
"""Return tensors matching spec with an added unknown batch dimension.
Args:
spec: A `tf.TypeSpec`, e.g. `tf.TensorSpec` or `tf.SparseTensorSpec`.
Returns:
A possibly nested tuple of Tensors matching the spec.
"""
tensors = []
for s in tf.nest.flatten(spec):
if isinstance(s, tf.SparseTensorSpec):
shape = tf.TensorShape([None]).concatenate(s.shape)
tensors.append(
tf.sparse.from_dense(
tf.compat.v1.placeholder(dtype=s.dtype, shape=shape)))
elif isinstance(s, tf.TensorSpec):
shape = tf.TensorShape([None]).concatenate(s.shape)
tensors.append(tf.compat.v1.placeholder(dtype=s.dtype, shape=shape))
else:
raise TypeError('Unexpected spec type: {}'.format(s))
return tf.nest.pack_sequence_as(spec, tensors)
def testGetOuterShapeNotBatched(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_size = nest_utils.get_outer_shape(tensor, spec)
self.assertAllEqual(self.evaluate(batch_size), [])
def testGetOuterShapeOneDim(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_size = nest_utils.get_outer_shape(tensor, spec)
self.assertEqual(self.evaluate(batch_size), [5])
def testGetOuterShapeTwoDims(self):
tensor = tf.zeros([7, 5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_dim = nest_utils.get_outer_shape(tensor, spec)
self.assertAllEqual(self.evaluate(batch_dim), [7, 5])
def testGetOuterShapeDynamicShapeBatched(self):
spec = tensor_spec.TensorSpec([1], dtype=tf.float32)
tensor = tf.convert_to_tensor(value=[[0.0]] * 8)
batch_size = self.evaluate(nest_utils.get_outer_shape(tensor, spec))
self.assertAllEqual(batch_size, [8])
def testGetOuterShapeDynamicShapeNotBatched(self):
spec = tensor_spec.TensorSpec([None, 1], dtype=tf.float32)
tensor = tf.convert_to_tensor(value=[[0.0]] * 8)
batch_size = self.evaluate(nest_utils.get_outer_shape(tensor, spec))
self.assertAllEqual(batch_size, [])
def testGetOuterDimsSingleTensorUnbatched(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_dims = nest_utils.get_outer_rank(tensor, spec)
self.assertFalse(batch_dims)
def testGetOuterDimsSingleTensorBatched(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_dims = nest_utils.get_outer_rank(tensor, spec)
self.assertEqual(batch_dims, 1)
def testGetOuterDimsSpecMismatchUnbatched(self):
tensor = tf.zeros([1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.get_outer_rank(tensor, spec)
def testGetOuterDimsSpecMismatchBatched(self):
tensor = tf.zeros([5, 1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.get_outer_rank(tensor, spec)
def testGetOuterDimsNestedTensorsUnbatched(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
batch_dims = nest_utils.get_outer_rank(tensors, specs)
self.assertFalse(batch_dims)
def testGetOuterDimsNestedTensorsBatched(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
batch_dims = nest_utils.get_outer_rank(tensors, specs)
self.assertEqual(batch_dims, 1)
def testGetOuterDimsNestedTensorsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
tensors['tensor_spec_1'] = tf.zeros(shape)
with self.assertRaises(ValueError):
nest_utils.get_outer_rank(tensors, specs)
def testGetOuterDimsNestedTensorsMultipleBatchDims(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
batch_dims = nest_utils.get_outer_rank(tensors, specs)
self.assertEqual(batch_dims, 2)
def testGetOuterDimsNestedTensorsMultipleBatchDimsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
# Tensors are ok.
self.assertEqual(nest_utils.get_outer_rank(tensors, specs), 2)
with self.assertRaises(ValueError):
tensors['tensor_spec_1'] = tf.zeros_like(tensors['tensor_spec_1'][0])
# Tensors are not ok.
nest_utils.get_outer_rank(tensors, specs)
def testIsBatchedSingleTensorFalse(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
is_batched = nest_utils.is_batched_nested_tensors(tensor, spec)
self.assertFalse(is_batched)
def testIsBatchedSingleTensorTrue(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
is_batched = nest_utils.is_batched_nested_tensors(tensor, spec)
self.assertTrue(is_batched)
def testIsBatchedSingleTensorValueErrorUnBatched(self):
tensor = tf.zeros([1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.is_batched_nested_tensors(tensor, spec)
def testIsBatchedSingleTensorValueErrorBatched(self):
tensor = tf.zeros([5, 1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.is_batched_nested_tensors(tensor, spec)
def testIsBatchedNestedTensorsFalse(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
is_batched = nest_utils.is_batched_nested_tensors(tensors, specs)
self.assertFalse(is_batched)
def testIsBatchedNestedTensorsTrue(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
is_batched = nest_utils.is_batched_nested_tensors(tensors, specs)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsAllowExtraFields(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
tensors['extra_field'] = tf.constant([1, 2, 3])
is_batched = nest_utils.is_batched_nested_tensors(
tensors, specs, allow_extra_fields=True)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
tensors['tensor_spec_1'] = tf.zeros(shape)
with self.assertRaises(ValueError):
nest_utils.is_batched_nested_tensors(tensors, specs)
def testDifferentRankCases(self):
state_spec = {
'first':
tensor_spec.TensorSpec(shape=(1,), dtype=tf.int32, name='second'),
'second':
tensor_spec.TensorSpec(shape=(1, 1), dtype=tf.int32, name='second'),
'third':
tensor_spec.TensorSpec(
shape=(1, 1, 1), dtype=tf.float32, name='third'),
}
batch_size = 2
condition = tf.ones((batch_size,), dtype=tf.bool)
a = self.zeros_from_spec(state_spec, batch_size=batch_size)
b = self.zeros_from_spec(state_spec, batch_size=batch_size)
c = nest_utils.where(condition, a, b)
self.assertEqual(c['first'].shape, (batch_size, 1))
self.assertEqual(c['second'].shape, (batch_size, 1, 1))
self.assertEqual(c['third'].shape, (batch_size, 1, 1, 1))
def testRankTooSmallRaisesValueError(self):
state_spec = {
'big':
tensor_spec.TensorSpec(shape=(1, 1), dtype=tf.int32, name='second'),
'small':
tensor_spec.TensorSpec(shape=(1,), dtype=tf.int32, name='second'),
}
batch_size = 2
condition = tf.ones((batch_size, 1, 1), dtype=tf.bool)
a = self.zeros_from_spec(state_spec, batch_size=batch_size)
b = self.zeros_from_spec(state_spec, batch_size=batch_size)
with self.assertRaises(ValueError):
nest_utils.where(condition, a, b)
def testRankTooSmallFunctionRaisesValueError(self):
state_spec = {
'big':
tensor_spec.TensorSpec(shape=(1, 1), dtype=tf.int32, name='second'),
'small':
tensor_spec.TensorSpec(shape=(1,), dtype=tf.int32, name='second'),
}
@tf.function
def aux_where():
batch_size = 2
condition = tf.ones((batch_size, 1, 1), dtype=tf.bool)
a = self.zeros_from_spec(state_spec, batch_size=batch_size)
b = self.zeros_from_spec(state_spec, batch_size=batch_size)
return nest_utils.where(condition, a, b)
with self.assertRaises(ValueError):
aux_where()
def testIsBatchedNestedTensorsMultipleBatchDimsFalse(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
is_batched = nest_utils.is_batched_nested_tensors(
tensors, specs, num_outer_dims=2)
self.assertFalse(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsTrue(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
is_batched = nest_utils.is_batched_nested_tensors(
tensors, specs, num_outer_dims=2)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsWrongBatchDimNumber(self):
shape = [2, 3]
specs = self.nest_spec(shape)
# Tensors only have one batch dim.
tensors = self.zeros_from_spec(specs, batch_size=2)
is_batched = nest_utils.is_batched_nested_tensors(tensors,
specs,
num_outer_dims=2)
self.assertFalse(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsRightBatchDimNumber(self):
shape = [2, 3]
specs = self.nest_spec(shape)
# Tensors only have one batch dim.
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[1])
is_batched = nest_utils.is_batched_nested_tensors(tensors,
specs,
num_outer_dims=2)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
# Tensors are ok.
nest_utils.is_batched_nested_tensors(tensors, specs, num_outer_dims=2)
with self.assertRaises(ValueError):
tensors['tensor_spec_1'] = tf.zeros_like(tensors['tensor_spec_1'][0])
# Tensors are not ok.
nest_utils.is_batched_nested_tensors(tensors, specs, num_outer_dims=2)
def testBatchSingleTensor(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batched_tensor = nest_utils.batch_nested_tensors(tensor, spec)
self.assertEqual(batched_tensor.shape.as_list(), [1, 2, 3])
def testBatchedSingleTensor(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batched_tensor = nest_utils.batch_nested_tensors(tensor, spec)
self.assertEqual(batched_tensor.shape.as_list(), [5, 2, 3])
def testWrongShapeRaisesValueError(self):
tensor = tf.zeros([3, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.batch_nested_tensors(tensor, spec)
def testBatchNestedTensorsNoSpec(self):
shape = [2, 3]
batch_shape = [1] + shape
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
tf.nest.assert_same_structure(tensors, specs)
batched_tensors = nest_utils.batch_nested_tensors(tensors)
tf.nest.assert_same_structure(specs, batched_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), batch_shape)
tf.nest.map_structure(assert_shapes, batched_tensors)
def testBatchNestedTensors(self):
shape = [2, 3]
batch_shape = [1] + shape
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
tf.nest.assert_same_structure(tensors, specs)
batched_tensors = nest_utils.batch_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batched_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), batch_shape)
tf.nest.map_structure(assert_shapes, batched_tensors)
def testBatchedNestedTensors(self):
shape = [2, 3]
batch_size = 5
batch_shape = [batch_size] + shape
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(tensors, specs)
batched_tensors = nest_utils.batch_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batched_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), batch_shape)
tf.nest.map_structure(assert_shapes, batched_tensors)
def testUnBatchSingleTensor(self):
batched_tensor = tf.zeros([1, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
tensor = nest_utils.unbatch_nested_tensors(batched_tensor, spec)
self.assertEqual(tensor.shape.as_list(), [2, 3])
def testUnBatchedSingleTensor(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
unbatched_tensor = nest_utils.unbatch_nested_tensors(tensor, spec)
self.assertEqual(unbatched_tensor.shape.as_list(), [2, 3])
def testUnBatchNestedTensorsNoSpec(self):
shape = [2, 3]
batch_size = 1
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.unbatch_nested_tensors(batched_tensors)
tf.nest.assert_same_structure(specs, tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape, t)
tf.nest.map_structure(assert_shapes, tensors)
def testUnBatchNestedTensors(self):
shape = [2, 3]
batch_size = 1
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.unbatch_nested_tensors(batched_tensors, specs)
tf.nest.assert_same_structure(specs, tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape, t)
tf.nest.map_structure(assert_shapes, tensors)
def testSplitNestedTensors(self):
shape = [2, 3]
batch_size = 7
specs = self.nest_spec(shape, include_sparse=True)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.split_nested_tensors(batched_tensors, specs,
batch_size)
self.assertLen(tensors, batch_size)
for t in tensors:
tf.nest.assert_same_structure(specs, t)
def assert_shapes(t):
if not tf.executing_eagerly() and isinstance(t, tf.SparseTensor):
# Constant value propagation in SparseTensors does not allow us to infer
# the value of output t.shape from input's t.shape; only its rank.
self.assertLen(t.shape, 1 + len(shape))
else:
self.assertEqual(t.shape.as_list(), [1] + shape)
tf.nest.map_structure(assert_shapes, tensors)
def testSplitNestedTensorsSizeSplits(self):
shape = [2, 3]
batch_size = 9
size_splits = [2, 4, 3]
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.split_nested_tensors(
batched_tensors, specs, size_splits)
self.assertEqual(len(tensors), len(size_splits))
for i, tensor in enumerate(tensors):
tf.nest.assert_same_structure(specs, tensor)
tf.nest.map_structure(
lambda t: self.assertEqual(t.shape.as_list()[0], size_splits[i]), # pylint: disable=cell-var-from-loop
tensor)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list()[1:], shape)
tf.nest.map_structure(assert_shapes, tensors)
def testUnstackNestedTensors(self):
shape = [5, 8]
batch_size = 7
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.unstack_nested_tensors(batched_tensors, specs)
self.assertLen(tensors, batch_size)
for t in tensors:
tf.nest.assert_same_structure(specs, t)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape)
tf.nest.map_structure(assert_shapes, tensors)
def testStackNestedTensors(self):
shape = [5, 8]
batch_size = 3
batched_shape = [batch_size,] + shape
specs = self.nest_spec(shape, include_sparse=False)
unstacked_tensors = [self.zeros_from_spec(specs) for _ in range(batch_size)]
stacked_tensor = nest_utils.stack_nested_tensors(unstacked_tensors)
tf.nest.assert_same_structure(specs, stacked_tensor)
assert_shapes = lambda tensor: self.assertEqual(tensor.shape, batched_shape)
tf.nest.map_structure(assert_shapes, stacked_tensor)
def testStackNestedTensorsAxis1(self):
shape = [5, 8]
stack_dim = 3
stacked_shape = [5, 3, 8]
specs = self.nest_spec(shape, include_sparse=False)
unstacked_tensors = [self.zeros_from_spec(specs)] * stack_dim
stacked_tensor = nest_utils.stack_nested_tensors(unstacked_tensors, axis=1)
tf.nest.assert_same_structure(specs, stacked_tensor)
assert_shapes = lambda tensor: self.assertEqual(tensor.shape, stacked_shape)
tf.nest.map_structure(assert_shapes, stacked_tensor)
def testUnBatchedNestedTensors(self, include_sparse=False):
shape = [2, 3]
specs = self.nest_spec(shape, include_sparse=include_sparse)
unbatched_tensors = self.zeros_from_spec(specs)
tf.nest.assert_same_structure(unbatched_tensors, specs)
tensors = nest_utils.unbatch_nested_tensors(unbatched_tensors, specs)
tf.nest.assert_same_structure(specs, tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape, t)
tf.nest.map_structure(assert_shapes, tensors)
def testFlattenMultiBatchedSingleTensor(self):
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
tensor = self.zeros_from_spec(spec, batch_size=7, extra_sizes=[5])
(batch_flattened_tensor,
batch_dims) = nest_utils.flatten_multi_batched_nested_tensors(tensor, spec)
self.assertEqual(batch_flattened_tensor.shape.as_list(), [35, 2, 3])
self.evaluate(tf.compat.v1.global_variables_initializer())
batch_dims_ = self.evaluate(batch_dims)
self.assertAllEqual(batch_dims_, [7, 5])
def testFlattenMultiBatchedNestedTensors(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=7, extra_sizes=[5])
(batch_flattened_tensors,
batch_dims) = nest_utils.flatten_multi_batched_nested_tensors(
tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), [35, 2, 3])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
self.evaluate(tf.compat.v1.global_variables_initializer())
batch_dims_ = self.evaluate(batch_dims)
self.assertAllEqual(batch_dims_, [7, 5])
def testFlattenMultiBatchedNestedTensorsWithPartiallyKnownShape(self):
if tf.executing_eagerly():
self.skipTest('Do not check nest processing of data in eager mode. '
'Placeholders are not compatible with eager execution.')
shape = [2, 3]
specs = self.nest_spec(shape, include_sparse=False)
tensors = self.placeholders_from_spec(specs)
(batch_flattened_tensors,
_) = nest_utils.flatten_multi_batched_nested_tensors(
tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), [None, 2, 3])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
def testFlattenMultiBatchedNestedTensorsWithSparseTensor(self):
if tf.executing_eagerly():
self.skipTest('Do not check nest processing of data in eager mode. '
'Placeholders are not compatible with eager execution.')
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=7, extra_sizes=[5])
(batch_flattened_tensors,
_) = nest_utils.flatten_multi_batched_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), [35, 2, 3])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
def testFlattenMultiBatchedNestedTensorsWithPartiallyKnownSparseTensor(self):
if tf.executing_eagerly():
self.skipTest('Do not check nest processing of data in eager mode. '
'Placeholders are not compatible with eager execution.')
shape = [2, None]
specs = self.nest_spec(shape)
tensors = self.placeholders_from_spec(specs)
(batch_flattened_tensors,
_) = nest_utils.flatten_multi_batched_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
def assert_shapes(t):
if isinstance(t, tf.SparseTensor):
self.assertEqual(t.shape.rank, 3)
else:
self.assertEqual(t.shape.as_list(), [None, 2, None])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
class NestedArraysTest(tf.test.TestCase):
"""Tests functions related to nested arrays."""
def nest_spec(self, shape=(2, 3), dtype=np.float32):
return {
'array_spec_1':
array_spec.ArraySpec(shape, dtype),
'bounded_spec_1':
array_spec.BoundedArraySpec(shape, dtype, -10, 10),
'dict_spec': {
'tensor_spec_2':
array_spec.ArraySpec(shape, dtype),
'bounded_spec_2':
array_spec.BoundedArraySpec(shape, dtype, -10, 10)
},
'tuple_spec': (
array_spec.ArraySpec(shape, dtype),
array_spec.BoundedArraySpec(shape, dtype, -10, 10),
),
'list_spec': [
array_spec.ArraySpec(shape, dtype),
(array_spec.ArraySpec(shape, dtype),
array_spec.BoundedArraySpec(shape, dtype, -10, 10)),
],
}
def zeros_from_spec(self, specs, outer_dims=None):
"""Return arrays matching spec with desired additional dimensions.
Args:
specs: A nested array spec.
outer_dims: An optional list of outer dimensions, e.g. batch size.
Returns:
A nested tuple of arrays matching the spec.
"""
outer_dims = outer_dims or []
def _zeros(spec):
return np.zeros(type(spec.shape)(outer_dims) + spec.shape, spec.dtype)
return tf.nest.map_structure(_zeros, specs)
def testUnstackNestedArrays(self):
shape = (5, 8)
batch_size = 3
specs = self.nest_spec(shape)
batched_arrays = self.zeros_from_spec(specs, outer_dims=[batch_size])
unbatched_arrays = nest_utils.unstack_nested_arrays(batched_arrays)
self.assertLen(unbatched_arrays, batch_size)
for array in unbatched_arrays:
tf.nest.assert_same_structure(specs, array)
assert_shapes = lambda a: self.assertEqual(a.shape, shape)
tf.nest.map_structure(assert_shapes, unbatched_arrays)
def testUnstackNestedArraysIntoFlatItems(self):
shape = (5, 8)
batch_size = 3
specs = self.nest_spec(shape)
batched_arrays = self.zeros_from_spec(specs, outer_dims=[batch_size])
unbatched_flat_items = nest_utils.unstack_nested_arrays_into_flat_items(
batched_arrays)
self.assertLen(unbatched_flat_items, batch_size)
for nested_array, flat_item in zip(
nest_utils.unstack_nested_arrays(batched_arrays), unbatched_flat_items):
self.assertAllEqual(flat_item, tf.nest.flatten(nested_array))
tf.nest.assert_same_structure(specs,
tf.nest.pack_sequence_as(specs, flat_item))
assert_shapes = lambda a: self.assertEqual(a.shape, shape)
tf.nest.map_structure(assert_shapes, unbatched_flat_items)
def testUnstackNestedArray(self):
shape = (5, 8)
batch_size = 1
specs = self.nest_spec(shape)
batched_arrays = self.zeros_from_spec(specs, outer_dims=[batch_size])
unbatched_arrays = nest_utils.unstack_nested_arrays(batched_arrays)
self.assertLen(unbatched_arrays, batch_size)
for array in unbatched_arrays:
tf.nest.assert_same_structure(specs, array)
assert_shapes = lambda a: self.assertEqual(a.shape, shape)
tf.nest.map_structure(assert_shapes, unbatched_arrays)
def testStackNestedArrays(self):
shape = (5, 8)
batch_size = 3
batched_shape = (batch_size,) + shape
specs = self.nest_spec(shape)
unstacked_arrays = [self.zeros_from_spec(specs) for _ in range(batch_size)]
stacked_array = nest_utils.stack_nested_arrays(unstacked_arrays)
tf.nest.assert_same_structure(specs, stacked_array)
assert_shapes = lambda a: self.assertEqual(a.shape, batched_shape)
tf.nest.map_structure(assert_shapes, stacked_array)
def testGetOuterArrayShape(self):
spec = (
array_spec.ArraySpec([5, 8], np.float32),
(array_spec.ArraySpec([1], np.int32),
array_spec.ArraySpec([2, 2, 2], np.float32))
)
batch_size = 3
unstacked_arrays = [self.zeros_from_spec(spec) for _ in range(batch_size)]
outer_dims = nest_utils.get_outer_array_shape(unstacked_arrays[0], spec)
self.assertEqual((), outer_dims)
stacked_array = nest_utils.stack_nested_arrays(unstacked_arrays)
outer_dims = nest_utils.get_outer_array_shape(stacked_array, spec)
self.assertEqual((batch_size,), outer_dims)
time_dim = [nest_utils.batch_nested_array(arr) for arr in unstacked_arrays]
batch_time = nest_utils.stack_nested_arrays(time_dim)
outer_dims = nest_utils.get_outer_array_shape(batch_time, spec)
self.assertEqual((batch_size, 1), outer_dims)
def testWhere(self):
condition = tf.convert_to_tensor([True, False, False, True, False])
true_output = tf.nest.map_structure(tf.convert_to_tensor,
(np.array([0] * 5), np.arange(1, 6)))
false_output = tf.nest.map_structure(tf.convert_to_tensor,
(np.array([1] * 5), np.arange(6, 11)))
result = nest_utils.where(condition, true_output, false_output)
result = self.evaluate(result)
expected = (np.array([0, 1, 1, 0, 1]), np.array([1, 7, 8, 4, 10]))
self.assertAllEqual(expected, result)
def testWhereDifferentRanks(self):
condition = tf.convert_to_tensor([True, False, False, True, False])
true_output = tf.nest.map_structure(
tf.convert_to_tensor,
(np.reshape(np.array([0] * 10),
(5, 2)), np.reshape(np.arange(1, 11), (5, 2))))
false_output = tf.nest.map_structure(
tf.convert_to_tensor,
(np.reshape(np.array([1] * 10),
(5, 2)), np.reshape(np.arange(12, 22), (5, 2))))
result = nest_utils.where(condition, true_output, false_output)
result = self.evaluate(result)
expected = (np.array([[0, 0], [1, 1], [1, 1], [0, 0], [1, 1]]),
np.array([[1, 2], [14, 15], [16, 17], [7, 8], [20, 21]]))
self.assertAllEqual(expected, result)
def testWhereSameRankDifferentDimension(self):
condition = tf.convert_to_tensor([True, False, True])
true_output = (tf.convert_to_tensor([1]), tf.convert_to_tensor([2]))
false_output = (tf.convert_to_tensor([3, 4, 5]),
tf.convert_to_tensor([6, 7, 8]))
result = nest_utils.where(condition, true_output, false_output)
result = self.evaluate(result)
expected = (np.array([1, 4, 1]), np.array([2, 7, 2]))
self.assertAllEqual(expected, result)
class PruneExtraKeysTest(tf.test.TestCase):
def testPruneExtraKeys(self):
self.assertEqual(nest_utils.prune_extra_keys({}, {'a': 1}), {})
self.assertEqual(nest_utils.prune_extra_keys((), {'a': 1}), ())
self.assertEqual(nest_utils.prune_extra_keys(
{'a': 1}, {'a': 'a'}), {'a': 'a'})
self.assertEqual(
nest_utils.prune_extra_keys({'a': 1}, {'a': 'a', 'b': 2}), {'a': 'a'})
self.assertEqual(
nest_utils.prune_extra_keys([{'a': 1}], [{'a': 'a', 'b': 2}]),
[{'a': 'a'}])
self.assertEqual(
nest_utils.prune_extra_keys({'a': (), 'b': None}, {'a': 1, 'b': 2}),
{'a': (), 'b': 2})
self.assertEqual(
nest_utils.prune_extra_keys(
{'a': {'aa': 1, 'ab': 2}, 'b': {'ba': 1}},
{'a': {'aa': 'aa', 'ab': 'ab', 'ac': 'ac'},
'b': {'ba': 'ba', 'bb': 'bb'},
'c': 'c'}),
{'a': {'aa': 'aa', 'ab': 'ab'}, 'b': {'ba': 'ba'}})
self.assertEqual(
nest_utils.prune_extra_keys(
{'a': ()},
DictWrapper({'a': DictWrapper({'b': None})})),
{'a': ()})
self.assertEqual(
nest_utils.prune_extra_keys(
{'a': 1, 'c': 2},
DictWrapper({'a': DictWrapper({'b': None})})),
{'a': {'b': None}})
def testInvalidWide(self):
self.assertEqual(nest_utils.prune_extra_keys(None, {'a': 1}), {'a': 1})
self.assertEqual(nest_utils.prune_extra_keys({'a': 1}, {}), {})
self.assertEqual(nest_utils.prune_extra_keys(
{'a': 1}, {'c': 'c'}), {'c': 'c'})
self.assertEqual(nest_utils.prune_extra_keys([], ['a']), ['a'])
self.assertEqual(
nest_utils.prune_extra_keys([{}, {}], [{'a': 1}]), [{'a': 1}])
def testNamedTuple(self):
class A(collections.namedtuple('A', ('a', 'b'))):
pass
self.assertEqual(
nest_utils.prune_extra_keys(
[A(a={'aa': 1}, b=3), {'c': 4}],
[A(a={'aa': 'aa', 'ab': 'ab'}, b='b'), {'c': 'c', 'd': 'd'}]),
[A(a={'aa': 'aa'}, b='b'), {'c': 'c'}])
def testSubtypesOfListAndDict(self):
class A(collections.namedtuple('A', ('a', 'b'))):
pass
self.assertEqual(
nest_utils.prune_extra_keys(
[data_structures.ListWrapper([None, DictWrapper({'a': 3, 'b': 4})]),
None,
TupleWrapper((DictWrapper({'g': 5}),)),
TupleWrapper(A(None, DictWrapper({'h': 6}))),
],
[['x', {'a': 'a', 'b': 'b', 'c': 'c'}],
'd',
({'g': 'g', 'gg': 'gg'},),
A(None, {'h': 'h', 'hh': 'hh'}),
]),
[data_structures.ListWrapper([
'x', DictWrapper({'a': 'a', 'b': 'b'})]),
'd',
TupleWrapper((DictWrapper({'g': 'g'}),)),
TupleWrapper(A(None, DictWrapper({'h': 'h'}),)),
])
def testOrderedDict(self):
OD = collections.OrderedDict # pylint: disable=invalid-name
self.assertEqual(
nest_utils.prune_extra_keys(
OD([('a', OD([('aa', 1), ('ab', 2)])),
('b', OD([('ba', 1)]))]),
OD([('a', OD([('aa', 'aa'), ('ab', 'ab'), ('ac', 'ac')])),
('b', OD([('ba', 'ba'), ('bb', 'bb')])),
('c', 'c')])),
OD([('a', OD([('aa', 'aa'), ('ab', 'ab')])),
('b', OD([('ba', 'ba')]))])
)
class TileBatchTest(tf.test.TestCase):
def test_tile_batch(self):
t = tf.constant([[1., 2., 3.], [4., 5., 6.]])
t_tile_batched = nest_utils.tile_batch(t, 2)
expected_t_tile_batched = tf.constant(
[[1., 2., 3.], [1., 2., 3.], [4., 5., 6.], [4., 5., 6.]])
self.assertAllEqual(
self.evaluate(expected_t_tile_batched), self.evaluate(t_tile_batched))
self.assertAllEqual((4, 3), t_tile_batched.shape)
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "acead16f780b02f5376fdbba0150588a",
"timestamp": "",
"source": "github",
"line_count": 943,
"max_line_length": 116,
"avg_line_length": 37.705196182396605,
"alnum_prop": 0.6420857239284509,
"repo_name": "tensorflow/agents",
"id": "6b0f0827cf883bfcd194a855290f0f7ea298204b",
"size": "36159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_agents/utils/nest_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4930266"
},
{
"name": "Shell",
"bytes": "10950"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _, ugettext
from sentry import roles
from sentry.models import OrganizationMember, OrganizationMemberTeam, Team
from sentry.web.frontend.base import OrganizationView
from sentry.web.forms.edit_organization_member import EditOrganizationMemberForm
class OrganizationMemberSettingsView(OrganizationView):
def get_form(self, request, member, allowed_roles):
return EditOrganizationMemberForm(
data=request.POST or None,
instance=member,
allowed_roles=allowed_roles,
initial={
'role': member.role,
'teams': Team.objects.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember=member,
).values('team'),
),
}
)
def resend_invite(self, request, organization, member):
messages.success(request, ugettext('An invitation to join %(organization)s has been sent to %(email)s') % {
'organization': organization.name,
'email': member.email,
})
member.send_invite_email()
redirect = reverse('sentry-organization-member-settings',
args=[organization.slug, member.id])
return self.redirect(redirect)
def view_member(self, request, organization, member):
context = {
'member': member,
'enabled_teams': set(member.teams.all()),
'all_teams': Team.objects.filter(
organization=organization,
),
'role_list': roles.get_all(),
}
return self.respond('sentry/organization-member-details.html', context)
def handle(self, request, organization, member_id):
try:
member = OrganizationMember.objects.get(
Q(user__is_active=True) | Q(user__isnull=True),
organization=organization,
id=member_id,
)
except OrganizationMember.DoesNotExist:
return self.redirect(reverse('sentry'))
if request.POST.get('op') == 'reinvite' and member.is_pending:
return self.resend_invite(request, organization, member)
can_admin = request.access.has_scope('member:delete')
if can_admin and not request.is_superuser():
acting_member = OrganizationMember.objects.get(
user=request.user,
organization=organization,
)
allowed_roles = [
r for r in roles.get_all()
if r.priority <= roles.get(acting_member.role).priority
]
can_admin = bool(allowed_roles)
elif request.is_superuser():
allowed_roles = roles.get_all()
if member.user == request.user or not can_admin:
return self.view_member(request, organization, member)
form = self.get_form(request, member, allowed_roles)
if form.is_valid():
member = form.save(request.user, organization, request.META['REMOTE_ADDR'])
messages.add_message(request, messages.SUCCESS,
_('Your changes were saved.'))
redirect = reverse('sentry-organization-member-settings',
args=[organization.slug, member.id])
return self.redirect(redirect)
context = {
'member': member,
'form': form,
'role_list': [
(r, r in allowed_roles)
for r in roles.get_all()
]
}
return self.respond('sentry/organization-member-settings.html', context)
| {
"content_hash": "ccdc4d6afae03c8f0a8aa54335be5de8",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 115,
"avg_line_length": 36.00934579439252,
"alnum_prop": 0.5883726966000519,
"repo_name": "mitsuhiko/sentry",
"id": "14b0d703fb7d7490ada837e05fa461c2d973a51f",
"size": "3853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/web/frontend/organization_member_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "171113"
},
{
"name": "Python",
"bytes": "877258"
}
],
"symlink_target": ""
} |
import getpass
import json
import tempfile
from unittest.mock import patch
from urllib.parse import urljoin
import pytest
import responses
from pythonanywhere.api.base import get_api_endpoint
from pythonanywhere.api.files_api import Files
class TestFiles:
username = getpass.getuser()
base_url = get_api_endpoint().format(username=username, flavor="files")
home_dir_path = f"/home/{username}"
default_home_dir_files = {
".bashrc": {"type": "file", "url": f"{base_url}path{home_dir_path}/.bashrc"},
".gitconfig": {"type": "file", "url": f"{base_url}path{home_dir_path}/.gitconfig"},
".local": {"type": "directory", "url": f"{base_url}path{home_dir_path}/.local"},
".profile": {"type": "file", "url": f"{base_url}path{home_dir_path}/.profile"},
"README.txt": {"type": "file", "url": f"{base_url}path{home_dir_path}/README.txt"},
}
readme_contents = (
b"# vim: set ft=rst:\n\nSee https://help.pythonanywhere.com/ "
b'(or click the "Help" link at the top\nright) '
b"for help on how to use PythonAnywhere, including tips on copying and\n"
b"pasting from consoles, and writing your own web applications.\n"
)
@pytest.mark.files
class TestFilesPathGet(TestFiles):
def test_returns_contents_of_directory_when_path_to_dir_provided(
self, api_token, api_responses,
):
dir_url = urljoin(self.base_url, f"path{self.home_dir_path}")
api_responses.add(
responses.GET,
url=dir_url,
status=200,
body=json.dumps(self.default_home_dir_files),
headers={"Content-Type": "application/json"},
)
assert Files().path_get(self.home_dir_path) == self.default_home_dir_files
def test_returns_file_contents_when_file_path_provided(self, api_token, api_responses):
filepath = urljoin(self.home_dir_path, "README.txt")
file_url = urljoin(self.base_url, f"path{filepath}")
body = self.readme_contents
api_responses.add(
responses.GET,
url=file_url,
status=200,
body=body,
headers={"Content-Type": "application/octet-stream; charset=utf-8"},
)
assert Files().path_get(filepath) == body
def test_raises_because_wrong_path_provided(self, api_token, api_responses):
wrong_path = "/foo"
wrong_url = urljoin(self.base_url, f"path{wrong_path}")
body = bytes(f'{{"detail": "No such file or directory: {wrong_path}"}}', "utf")
api_responses.add(
responses.GET,
url=wrong_url,
status=404,
body=body,
headers={"Content-Type": "application/json"},
)
with pytest.raises(Exception) as e:
Files().path_get(wrong_path)
expected_error_msg = (
f"GET to fetch contents of {wrong_url} failed, got <Response [404]>: "
f"No such file or directory: {wrong_path}"
)
assert str(e.value) == expected_error_msg
@pytest.mark.files
class TestFilesPathPost(TestFiles):
def test_returns_200_when_file_updated(self, api_token, api_responses):
existing_file_path = f"{self.home_dir_path}/README.txt"
existing_file_url = self.default_home_dir_files["README.txt"]["url"]
api_responses.add(
responses.POST,
url=existing_file_url,
status=200,
)
content = "content".encode()
result = Files().path_post(existing_file_path, content)
assert result == 200
def test_returns_201_when_file_uploaded(self, api_token, api_responses):
new_file_path = f"{self.home_dir_path}/new.txt"
new_file_url = f"{self.base_url}path{self.home_dir_path}/new.txt"
api_responses.add(
responses.POST,
url=new_file_url,
status=201,
)
content = "content".encode()
result = Files().path_post(new_file_path, content)
assert result == 201
def test_raises_when_wrong_path(self, api_token, api_responses):
invalid_path = "foo"
url_with_invalid_path = urljoin(self.base_url, f"path{invalid_path}")
api_responses.add(
responses.POST,
url=url_with_invalid_path,
status=404,
)
content = "content".encode()
with pytest.raises(Exception) as e:
Files().path_post(invalid_path, content)
expected_error_msg = (
f"POST to upload contents to {url_with_invalid_path} failed, got <Response [404]>"
)
assert str(e.value) == expected_error_msg
def test_raises_when_no_contents_provided(self, api_token, api_responses):
valid_path = f"{self.home_dir_path}/README.txt"
valid_url = urljoin(self.base_url, f"path{valid_path}")
body = bytes('{"detail": "You must provide a file with the name \'content\'."}', "utf")
api_responses.add(
responses.POST,
url=valid_url,
status=400,
body=body,
headers={"Content-Type": "application/json"},
)
with pytest.raises(Exception) as e:
Files().path_post(valid_path, None)
expected_error_msg = (
f"POST to upload contents to {valid_url} failed, got <Response [400]>: "
"You must provide a file with the name 'content'."
)
assert str(e.value) == expected_error_msg
@pytest.mark.files
class TestFilesPathDelete(TestFiles):
def test_returns_204_on_successful_file_deletion(self, api_token, api_responses):
valid_path = f"{self.home_dir_path}/README.txt"
valid_url = urljoin(self.base_url, f"path{valid_path}")
api_responses.add(
responses.DELETE,
url=valid_url,
status=204,
)
result = Files().path_delete(valid_path)
assert result == 204
def test_raises_when_permission_denied(self, api_token, api_responses):
home_dir_url = urljoin(self.base_url, f"path{self.home_dir_path}")
body = bytes(
'{"message":"You do not have permission to delete this","code":"forbidden"}',
"utf"
)
api_responses.add(
responses.DELETE,
url=home_dir_url,
status=403,
body=body,
headers={"Content-Type": "application/json"},
)
with pytest.raises(Exception) as e:
Files().path_delete(self.home_dir_path)
expected_error_msg = (
f"DELETE on {home_dir_url} failed, got <Response [403]>: "
"You do not have permission to delete this"
)
assert str(e.value) == expected_error_msg
def test_raises_when_wrong_path_provided(self, api_token, api_responses):
invalid_path = "/home/some_other_user/"
invalid_url = urljoin(self.base_url, f"path{invalid_path}")
body = bytes('{"message":"File does not exist","code":"not_found"}', "utf")
api_responses.add(
responses.DELETE,
url=invalid_url,
status=404,
body=body,
headers={"Content-Type": "application/json"},
)
with pytest.raises(Exception) as e:
Files().path_delete(invalid_path)
expected_error_msg = (
f"DELETE on {invalid_url} failed, got <Response [404]>: "
"File does not exist"
)
assert str(e.value) == expected_error_msg
@pytest.mark.files
class TestFilesSharingPost(TestFiles):
def test_returns_url_when_path_successfully_shared_or_has_been_shared_before(
self, api_token, api_responses
):
valid_path = f"{self.home_dir_path}/README.txt"
shared_url = f"/user/{self.username}/shares/asdf1234/"
partial_response = dict(
method=responses.POST,
url=urljoin(self.base_url, "sharing/"),
body=bytes(f'{{"url": "{shared_url}"}}', "utf"),
headers={"Content-Type": "application/json"},
)
api_responses.add(**partial_response, status=201)
api_responses.add(**partial_response, status=200)
files = Files()
first_share = files.sharing_post(valid_path)
assert first_share[0] == 201
assert first_share[1] == shared_url
second_share = files.sharing_post(valid_path)
assert second_share[0] == 200
assert second_share[1] == shared_url
@pytest.mark.skip(reason="not implemented in the api yet")
def test_raises_exception_when_path_not_provided(self, api_token, api_responses):
url = urljoin(self.base_url, "sharing/")
api_responses.add(
responses.POST,
url=url,
status=400,
body=bytes('{"error": "required field (path) not found"}', "utf"),
headers={"Content-Type": "application/json"},
)
with pytest.raises(Exception) as e:
Files().sharing_post("")
expected_error_msg = (
f"POST to {url} to share '' failed, got <Response [400]>: "
"provided path is not valid" # or similar
)
assert str(e.value) == expected_error_msg
@pytest.mark.files
class TestFilesSharingGet(TestFiles):
def test_returns_sharing_url_when_path_is_shared(self, api_token, api_responses):
valid_path = f"{self.home_dir_path}/README.txt"
sharing_url = urljoin(self.base_url, f"sharing/")
get_url = urljoin(self.base_url, f"sharing/?path={valid_path}")
shared_url = f"/user/{self.username}/shares/asdf1234/"
partial_response = dict(
body=bytes(f'{{"url": "{shared_url}"}}', "utf"),
headers={"Content-Type": "application/json"},
)
api_responses.add(**partial_response, method=responses.POST, url=sharing_url, status=201)
api_responses.add(**partial_response, method=responses.GET, url=get_url, status=200)
files = Files()
files.sharing_post(valid_path)
assert files.sharing_get(valid_path) == shared_url
def test_returns_empty_string_when_path_not_shared(self, api_token, api_responses):
valid_path = f"{self.home_dir_path}/README.txt"
url = urljoin(self.base_url, f"sharing/?path={valid_path}")
api_responses.add(method=responses.GET, url=url, status=404)
assert Files().sharing_get(valid_path) == ""
@pytest.mark.files
class TestFilesSharingDelete(TestFiles):
def test_returns_204_on_sucessful_unshare(self, api_token, api_responses):
valid_path = f"{self.home_dir_path}/README.txt"
url = urljoin(self.base_url, f"sharing/?path={valid_path}")
shared_url = f"/user/{self.username}/shares/asdf1234/"
api_responses.add(method=responses.DELETE, url=url, status=204)
assert Files().sharing_delete(valid_path) == 204
@pytest.mark.files
class TestFilesTreeGet(TestFiles):
def test_returns_list_of_the_regular_files_and_subdirectories_of_a_directory(
self, api_token, api_responses
):
url = urljoin(self.base_url, f"tree/?path={self.home_dir_path}")
self.default_home_dir_files["foo"] = {
"type": "directory", "url": f"{self.base_url}path{self.home_dir_path}/foo"
},
tree = f'["{self.home_dir_path}/README.txt", "{self.home_dir_path}/foo/"]'
api_responses.add(
responses.GET,
url=url,
status=200,
body=bytes(tree, "utf"),
headers={"Content-Type": "application/json"},
)
result = Files().tree_get(self.home_dir_path)
assert result == [f"{self.home_dir_path}/{file}" for file in ["README.txt", "foo/"]]
def test_raises_when_path_not_pointing_to_directory(self, api_token, api_responses):
invalid_path = "/hmoe/oof"
url = urljoin(self.base_url, f"tree/?path={invalid_path}")
api_responses.add(
responses.GET,
url=url,
status=400,
body=bytes(f'{{"detail": "{invalid_path} is not a directory"}}', "utf"),
headers={"Content-Type": "application/json"},
)
with pytest.raises(Exception) as e:
Files().tree_get(invalid_path)
expected_error_msg = (
f"GET to {url} failed, got <Response [400]>: {invalid_path} is not a directory"
)
assert str(e.value) == expected_error_msg
def test_raises_when_path_does_not_exist(self, api_token, api_responses):
invalid_path = "/hmoe/oof"
url = urljoin(self.base_url, f"tree/?path={invalid_path}")
api_responses.add(
responses.GET,
url=url,
status=400,
body=bytes(f'{{"detail": "{invalid_path} does not exist"}}', "utf"),
headers={"Content-Type": "application/json"},
)
with pytest.raises(Exception) as e:
Files().tree_get(invalid_path)
expected_error_msg = (
f"GET to {url} failed, got <Response [400]>: {invalid_path} does not exist"
)
assert str(e.value) == expected_error_msg
| {
"content_hash": "090db8b91fc59a35f3a1b3ce8924290a",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 97,
"avg_line_length": 36.78212290502793,
"alnum_prop": 0.5896111786148238,
"repo_name": "pythonanywhere/helper_scripts",
"id": "718d4b12e77dbd963a44f93de44e1a9661b2cb1c",
"size": "13168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_api_files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "272917"
}
],
"symlink_target": ""
} |
import os
from collections import Sequence, namedtuple
from urllib.parse import urlparse
import markdown
from mkdocs import utils, theme, plugins
from mkdocs.config.base import Config, ValidationError
class BaseConfigOption:
def __init__(self):
self.warnings = []
self.default = None
def is_required(self):
return False
def validate(self, value):
return self.run_validation(value)
def reset_warnings(self):
self.warnings = []
def pre_validation(self, config, key_name):
"""
Before all options are validated, perform a pre-validation process.
The pre-validation process method should be implemented by subclasses.
"""
def run_validation(self, value):
"""
Perform validation for a value.
The run_validation method should be implemented by subclasses.
"""
return value
def post_validation(self, config, key_name):
"""
After all options have passed validation, perform a post-validation
process to do any additional changes dependant on other config values.
The post-validation process method should be implemented by subclasses.
"""
class SubConfig(BaseConfigOption, Config):
def __init__(self, *config_options):
BaseConfigOption.__init__(self)
Config.__init__(self, config_options)
self.default = {}
def validate(self, value):
self.load_dict(value)
return self.run_validation(value)
def run_validation(self, value):
Config.validate(self)
return self
class ConfigItems(BaseConfigOption):
"""
Config Items Option
Validates a list of mappings that all must match the same set of
options.
"""
def __init__(self, *config_options, **kwargs):
BaseConfigOption.__init__(self)
self.item_config = SubConfig(*config_options)
self.required = kwargs.get('required', False)
def __repr__(self):
return '{}: {}'.format(self.__class__.__name__, self.item_config)
def run_validation(self, value):
if value is None:
if self.required:
raise ValidationError("Required configuration not provided.")
else:
return ()
if not isinstance(value, Sequence):
raise ValidationError('Expected a sequence of mappings, but a %s '
'was given.' % type(value))
result = []
for item in value:
result.append(self.item_config.validate(item))
return result
class OptionallyRequired(BaseConfigOption):
"""
A subclass of BaseConfigOption that adds support for default values and
required values. It is a base class for config options.
"""
def __init__(self, default=None, required=False):
super().__init__()
self.default = default
self.required = required
def is_required(self):
return self.required
def validate(self, value):
"""
Perform some initial validation.
If the option is empty (None) and isn't required, leave it as such. If
it is empty but has a default, use that. Finally, call the
run_validation method on the subclass unless.
"""
if value is None:
if self.default is not None:
if hasattr(self.default, 'copy'):
# ensure no mutable values are assigned
value = self.default.copy()
else:
value = self.default
elif not self.required:
return
elif self.required:
raise ValidationError("Required configuration not provided.")
return self.run_validation(value)
class Type(OptionallyRequired):
"""
Type Config Option
Validate the type of a config option against a given Python type.
"""
def __init__(self, type_, length=None, **kwargs):
super().__init__(**kwargs)
self._type = type_
self.length = length
def run_validation(self, value):
if not isinstance(value, self._type):
msg = ("Expected type: {} but received: {}"
.format(self._type, type(value)))
elif self.length is not None and len(value) != self.length:
msg = ("Expected type: {0} with length {2} but received: {1} with "
"length {3}").format(self._type, value, self.length,
len(value))
else:
return value
raise ValidationError(msg)
class Choice(OptionallyRequired):
"""
Choice Config Option
Validate the config option against a strict set of values.
"""
def __init__(self, choices, **kwargs):
super().__init__(**kwargs)
try:
length = len(choices)
except TypeError:
length = 0
if not length or isinstance(choices, str):
raise ValueError('Expected iterable of choices, got {}', choices)
self.choices = choices
def run_validation(self, value):
if value not in self.choices:
msg = ("Expected one of: {} but received: {}"
.format(self.choices, value))
else:
return value
raise ValidationError(msg)
class Deprecated(BaseConfigOption):
def __init__(self, moved_to=None):
super().__init__()
self.default = None
self.moved_to = moved_to
def pre_validation(self, config, key_name):
if config.get(key_name) is None or self.moved_to is None:
return
warning = ('The configuration option {} has been deprecated and '
'will be removed in a future release of MkDocs.'
''.format(key_name))
self.warnings.append(warning)
if '.' not in self.moved_to:
target = config
target_key = self.moved_to
else:
move_to, target_key = self.moved_to.rsplit('.', 1)
target = config
for key in move_to.split('.'):
target = target.setdefault(key, {})
if not isinstance(target, dict):
# We can't move it for the user
return
target[target_key] = config.pop(key_name)
class IpAddress(OptionallyRequired):
"""
IpAddress Config Option
Validate that an IP address is in an apprioriate format
"""
def run_validation(self, value):
try:
host, port = value.rsplit(':', 1)
except Exception:
raise ValidationError("Must be a string of format 'IP:PORT'")
try:
port = int(port)
except Exception:
raise ValidationError("'{}' is not a valid port".format(port))
class Address(namedtuple('Address', 'host port')):
def __str__(self):
return '{}:{}'.format(self.host, self.port)
return Address(host, port)
class URL(OptionallyRequired):
"""
URL Config Option
Validate a URL by requiring a scheme is present.
"""
def __init__(self, default='', required=False):
super().__init__(default, required)
def run_validation(self, value):
if value == '':
return value
try:
parsed_url = urlparse(value)
except (AttributeError, TypeError):
raise ValidationError("Unable to parse the URL.")
if parsed_url.scheme:
return value
raise ValidationError(
"The URL isn't valid, it should include the http:// (scheme)")
class RepoURL(URL):
"""
Repo URL Config Option
A small extension to the URL config that sets the repo_name and edit_uri,
based on the url if they haven't already been provided.
"""
def post_validation(self, config, key_name):
repo_host = urlparse(config['repo_url']).netloc.lower()
edit_uri = config.get('edit_uri')
# derive repo_name from repo_url if unset
if config['repo_url'] is not None and config.get('repo_name') is None:
if repo_host == 'github.com':
config['repo_name'] = 'GitHub'
elif repo_host == 'bitbucket.org':
config['repo_name'] = 'Bitbucket'
elif repo_host == 'gitlab.com':
config['repo_name'] = 'GitLab'
else:
config['repo_name'] = repo_host.split('.')[0].title()
# derive edit_uri from repo_name if unset
if config['repo_url'] is not None and edit_uri is None:
if repo_host == 'github.com' or repo_host == 'gitlab.com':
edit_uri = 'edit/master/docs/'
elif repo_host == 'bitbucket.org':
edit_uri = 'src/default/docs/'
else:
edit_uri = ''
# ensure a well-formed edit_uri
if edit_uri:
if not edit_uri.startswith(('?', '#')) \
and not config['repo_url'].endswith('/'):
config['repo_url'] += '/'
if not edit_uri.endswith('/'):
edit_uri += '/'
config['edit_uri'] = edit_uri
class FilesystemObject(Type):
"""
Base class for options that point to filesystem objects.
"""
def __init__(self, exists=False, **kwargs):
super().__init__(type_=str, **kwargs)
self.exists = exists
self.config_dir = None
def pre_validation(self, config, key_name):
self.config_dir = os.path.dirname(config.config_file_path) if config.config_file_path else None
def run_validation(self, value):
value = super().run_validation(value)
if self.config_dir and not os.path.isabs(value):
value = os.path.join(self.config_dir, value)
if self.exists and not self.existence_test(value):
raise ValidationError("The path {path} isn't an existing {name}.".
format(path=value, name=self.name))
value = os.path.abspath(value)
assert isinstance(value, str)
return value
class Dir(FilesystemObject):
"""
Dir Config Option
Validate a path to a directory, optionally verifying that it exists.
"""
existence_test = staticmethod(os.path.isdir)
name = 'directory'
def post_validation(self, config, key_name):
if config.config_file_path is None:
return
# Validate that the dir is not the parent dir of the config file.
if os.path.dirname(config.config_file_path) == config[key_name]:
raise ValidationError(
("The '{}' should not be the parent directory of the config "
"file. Use a child directory instead so that the config file "
"is a sibling of the config file.").format(key_name))
class File(FilesystemObject):
"""
File Config Option
Validate a path to a file, optionally verifying that it exists.
"""
existence_test = staticmethod(os.path.isfile)
name = 'file'
class SiteDir(Dir):
"""
SiteDir Config Option
Validates the site_dir and docs_dir directories do not contain each other.
"""
def post_validation(self, config, key_name):
super().post_validation(config, key_name)
# Validate that the docs_dir and site_dir don't contain the
# other as this will lead to copying back and forth on each
# and eventually make a deep nested mess.
if (config['docs_dir'] + os.sep).startswith(config['site_dir'].rstrip(os.sep) + os.sep):
raise ValidationError(
("The 'docs_dir' should not be within the 'site_dir' as this "
"can mean the source files are overwritten by the output or "
"it will be deleted if --clean is passed to mkdocs build."
"(site_dir: '{}', docs_dir: '{}')"
).format(config['site_dir'], config['docs_dir']))
elif (config['site_dir'] + os.sep).startswith(config['docs_dir'].rstrip(os.sep) + os.sep):
raise ValidationError(
("The 'site_dir' should not be within the 'docs_dir' as this "
"leads to the build directory being copied into itself and "
"duplicate nested files in the 'site_dir'."
"(site_dir: '{}', docs_dir: '{}')"
).format(config['site_dir'], config['docs_dir']))
class Theme(BaseConfigOption):
"""
Theme Config Option
Validate that the theme exists and build Theme instance.
"""
def __init__(self, default=None):
super().__init__()
self.default = default
def validate(self, value):
if value is None and self.default is not None:
value = {'name': self.default}
if isinstance(value, str):
value = {'name': value}
themes = utils.get_theme_names()
if isinstance(value, dict):
if 'name' in value:
if value['name'] is None or value['name'] in themes:
return value
raise ValidationError(
"Unrecognised theme name: '{}'. The available installed themes "
"are: {}".format(value['name'], ', '.join(themes))
)
raise ValidationError("No theme name set.")
raise ValidationError('Invalid type "{}". Expected a string or key/value pairs.'.format(type(value)))
def post_validation(self, config, key_name):
theme_config = config[key_name]
if not theme_config['name'] and 'custom_dir' not in theme_config:
raise ValidationError("At least one of 'theme.name' or 'theme.custom_dir' must be defined.")
# Ensure custom_dir is an absolute path
if 'custom_dir' in theme_config and not os.path.isabs(theme_config['custom_dir']):
config_dir = os.path.dirname(config.config_file_path)
theme_config['custom_dir'] = os.path.join(config_dir, theme_config['custom_dir'])
if 'custom_dir' in theme_config and not os.path.isdir(theme_config['custom_dir']):
raise ValidationError("The path set in {name}.custom_dir ('{path}') does not exist.".
format(path=theme_config['custom_dir'], name=key_name))
config[key_name] = theme.Theme(**theme_config)
class Nav(OptionallyRequired):
"""
Nav Config Option
Validate the Nav config. Automatically add all markdown files if empty.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.file_match = utils.is_markdown_file
def run_validation(self, value):
if not isinstance(value, list):
raise ValidationError(
"Expected a list, got {}".format(type(value)))
if len(value) == 0:
return
config_types = {type(l) for l in value}
if config_types.issubset({str, dict, str}):
return value
raise ValidationError("Invalid pages config. {} {}".format(
config_types, {str, dict}
))
def post_validation(self, config, key_name):
# TODO: remove this when `pages` config setting is fully deprecated.
if key_name == 'pages' and config['pages'] is not None:
if config['nav'] is None:
# copy `pages` config to new 'nav' config setting
config['nav'] = config['pages']
warning = ("The 'pages' configuration option has been deprecated and will "
"be removed in a future release of MkDocs. Use 'nav' instead.")
self.warnings.append(warning)
class Private(OptionallyRequired):
"""
Private Config Option
A config option only for internal use. Raises an error if set by the user.
"""
def run_validation(self, value):
raise ValidationError('For internal use only.')
class MarkdownExtensions(OptionallyRequired):
"""
Markdown Extensions Config Option
A list of extensions. If a list item contains extension configs,
those are set on the private setting passed to `configkey`. The
`builtins` keyword accepts a list of extensions which cannot be
overriden by the user. However, builtins can be duplicated to define
config options for them if desired.
"""
def __init__(self, builtins=None, configkey='mdx_configs', **kwargs):
super().__init__(**kwargs)
self.builtins = builtins or []
self.configkey = configkey
self.configdata = {}
def run_validation(self, value):
if not isinstance(value, (list, tuple)):
raise ValidationError('Invalid Markdown Extensions configuration')
extensions = []
for item in value:
if isinstance(item, dict):
if len(item) > 1:
raise ValidationError('Invalid Markdown Extensions configuration')
ext, cfg = item.popitem()
extensions.append(ext)
if cfg is None:
continue
if not isinstance(cfg, dict):
raise ValidationError('Invalid config options for Markdown '
"Extension '{}'.".format(ext))
self.configdata[ext] = cfg
elif isinstance(item, str):
extensions.append(item)
else:
raise ValidationError('Invalid Markdown Extensions configuration')
extensions = utils.reduce_list(self.builtins + extensions)
# Confirm that Markdown considers extensions to be valid
try:
markdown.Markdown(extensions=extensions, extension_configs=self.configdata)
except Exception as e:
raise ValidationError(e.args[0])
return extensions
def post_validation(self, config, key_name):
config[self.configkey] = self.configdata
class Plugins(OptionallyRequired):
"""
Plugins config option.
A list of plugins. If a plugin defines config options those are used when
initializing the plugin class.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.installed_plugins = plugins.get_plugins()
self.config_file_path = None
def pre_validation(self, config, key_name):
self.config_file_path = config.config_file_path
def run_validation(self, value):
if not isinstance(value, (list, tuple)):
raise ValidationError('Invalid Plugins configuration. Expected a list of plugins')
plgins = plugins.PluginCollection()
for item in value:
if isinstance(item, dict):
if len(item) > 1:
raise ValidationError('Invalid Plugins configuration')
name, cfg = item.popitem()
cfg = cfg or {} # Users may define a null (None) config
if not isinstance(cfg, dict):
raise ValidationError('Invalid config options for '
'the "{}" plugin.'.format(name))
item = name
else:
cfg = {}
if not isinstance(item, str):
raise ValidationError('Invalid Plugins configuration')
plgins[item] = self.load_plugin(item, cfg)
return plgins
def load_plugin(self, name, config):
if name not in self.installed_plugins:
raise ValidationError('The "{}" plugin is not installed'.format(name))
Plugin = self.installed_plugins[name].load()
if not issubclass(Plugin, plugins.BasePlugin):
raise ValidationError('{}.{} must be a subclass of {}.{}'.format(
Plugin.__module__, Plugin.__name__, plugins.BasePlugin.__module__,
plugins.BasePlugin.__name__))
plugin = Plugin()
errors, warnings = plugin.load_config(config, self.config_file_path)
self.warnings.extend(warnings)
errors_message = '\n'.join(
"Plugin value: '{}'. Error: {}".format(x, y)
for x, y in errors
)
if errors_message:
raise ValidationError(errors_message)
return plugin
| {
"content_hash": "2780905289408e269f0fa2ff262534e8",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 109,
"avg_line_length": 33.00813008130081,
"alnum_prop": 0.5784236453201971,
"repo_name": "jimporter/mkdocs",
"id": "5fd61f1d7751b3a71ab6b8cbbca345113124dd1a",
"size": "20300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkdocs/config/config_options.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "9145"
},
{
"name": "HTML",
"bytes": "28317"
},
{
"name": "JavaScript",
"bytes": "115264"
},
{
"name": "Python",
"bytes": "375980"
}
],
"symlink_target": ""
} |
"""Class to monitor a MongoDB server on a background thread."""
import atexit
import time
import weakref
from typing import Any, Mapping, cast
from pymongo import common, periodic_executor
from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled
from pymongo.hello import Hello
from pymongo.lock import _create_lock
from pymongo.periodic_executor import _shutdown_executors
from pymongo.read_preferences import MovingAverage
from pymongo.server_description import ServerDescription
from pymongo.srv_resolver import _SrvResolver
def _sanitize(error):
"""PYTHON-2433 Clear error traceback info."""
error.__traceback__ = None
error.__context__ = None
error.__cause__ = None
class MonitorBase(object):
def __init__(self, topology, name, interval, min_interval):
"""Base class to do periodic work on a background thread.
The the background thread is signaled to stop when the Topology or
this instance is freed.
"""
# We strongly reference the executor and it weakly references us via
# this closure. When the monitor is freed, stop the executor soon.
def target():
monitor = self_ref()
if monitor is None:
return False # Stop the executor.
monitor._run() # type:ignore[attr-defined]
return True
executor = periodic_executor.PeriodicExecutor(
interval=interval, min_interval=min_interval, target=target, name=name
)
self._executor = executor
def _on_topology_gc(dummy=None):
# This prevents GC from waiting 10 seconds for hello to complete
# See test_cleanup_executors_on_client_del.
monitor = self_ref()
if monitor:
monitor.gc_safe_close()
# Avoid cycles. When self or topology is freed, stop executor soon.
self_ref = weakref.ref(self, executor.close)
self._topology = weakref.proxy(topology, _on_topology_gc)
_register(self)
def open(self):
"""Start monitoring, or restart after a fork.
Multiple calls have no effect.
"""
self._executor.open()
def gc_safe_close(self):
"""GC safe close."""
self._executor.close()
def close(self):
"""Close and stop monitoring.
open() restarts the monitor after closing.
"""
self.gc_safe_close()
def join(self, timeout=None):
"""Wait for the monitor to stop."""
self._executor.join(timeout)
def request_check(self):
"""If the monitor is sleeping, wake it soon."""
self._executor.wake()
class Monitor(MonitorBase):
def __init__(self, server_description, topology, pool, topology_settings):
"""Class to monitor a MongoDB server on a background thread.
Pass an initial ServerDescription, a Topology, a Pool, and
TopologySettings.
The Topology is weakly referenced. The Pool must be exclusive to this
Monitor.
"""
super(Monitor, self).__init__(
topology,
"pymongo_server_monitor_thread",
topology_settings.heartbeat_frequency,
common.MIN_HEARTBEAT_INTERVAL,
)
self._server_description = server_description
self._pool = pool
self._settings = topology_settings
self._listeners = self._settings._pool_options._event_listeners
pub = self._listeners is not None
self._publish = pub and self._listeners.enabled_for_server_heartbeat
self._cancel_context = None
self._rtt_monitor = _RttMonitor(
topology,
topology_settings,
topology._create_pool_for_monitor(server_description.address),
)
self.heartbeater = None
def cancel_check(self):
"""Cancel any concurrent hello check.
Note: this is called from a weakref.proxy callback and MUST NOT take
any locks.
"""
context = self._cancel_context
if context:
# Note: we cannot close the socket because doing so may cause
# concurrent reads/writes to hang until a timeout occurs
# (depending on the platform).
context.cancel()
def _start_rtt_monitor(self):
"""Start an _RttMonitor that periodically runs ping."""
# If this monitor is closed directly before (or during) this open()
# call, the _RttMonitor will not be closed. Checking if this monitor
# was closed directly after resolves the race.
self._rtt_monitor.open()
if self._executor._stopped:
self._rtt_monitor.close()
def gc_safe_close(self):
self._executor.close()
self._rtt_monitor.gc_safe_close()
self.cancel_check()
def close(self):
self.gc_safe_close()
self._rtt_monitor.close()
# Increment the generation and maybe close the socket. If the executor
# thread has the socket checked out, it will be closed when checked in.
self._reset_connection()
def _reset_connection(self):
# Clear our pooled connection.
self._pool.reset()
def _run(self):
try:
prev_sd = self._server_description
try:
self._server_description = self._check_server()
except _OperationCancelled as exc:
_sanitize(exc)
# Already closed the connection, wait for the next check.
self._server_description = ServerDescription(
self._server_description.address, error=exc
)
if prev_sd.is_server_type_known:
# Immediately retry since we've already waited 500ms to
# discover that we've been cancelled.
self._executor.skip_sleep()
return
# Update the Topology and clear the server pool on error.
self._topology.on_change(
self._server_description, reset_pool=self._server_description.error
)
if (
self._server_description.is_server_type_known
and self._server_description.topology_version
):
self._start_rtt_monitor()
# Immediately check for the next streaming response.
self._executor.skip_sleep()
if self._server_description.error and prev_sd.is_server_type_known:
# Immediately retry on network errors.
self._executor.skip_sleep()
except ReferenceError:
# Topology was garbage-collected.
self.close()
def _check_server(self):
"""Call hello or read the next streaming response.
Returns a ServerDescription.
"""
start = time.monotonic()
try:
try:
return self._check_once()
except (OperationFailure, NotPrimaryError) as exc:
# Update max cluster time even when hello fails.
details = cast(Mapping[str, Any], exc.details)
self._topology.receive_cluster_time(details.get("$clusterTime"))
raise
except ReferenceError:
raise
except Exception as error:
_sanitize(error)
sd = self._server_description
address = sd.address
duration = time.monotonic() - start
if self._publish:
awaited = sd.is_server_type_known and sd.topology_version
self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited)
self._reset_connection()
if isinstance(error, _OperationCancelled):
raise
self._rtt_monitor.reset()
# Server type defaults to Unknown.
return ServerDescription(address, error=error)
def _check_once(self):
"""A single attempt to call hello.
Returns a ServerDescription, or raises an exception.
"""
address = self._server_description.address
if self._publish:
self._listeners.publish_server_heartbeat_started(address)
if self._cancel_context and self._cancel_context.cancelled:
self._reset_connection()
with self._pool.get_socket() as sock_info:
self._cancel_context = sock_info.cancel_context
response, round_trip_time = self._check_with_socket(sock_info)
if not response.awaitable:
self._rtt_monitor.add_sample(round_trip_time)
sd = ServerDescription(address, response, self._rtt_monitor.average())
if self._publish:
self._listeners.publish_server_heartbeat_succeeded(
address, round_trip_time, response, response.awaitable
)
return sd
def _check_with_socket(self, conn):
"""Return (Hello, round_trip_time).
Can raise ConnectionFailure or OperationFailure.
"""
cluster_time = self._topology.max_cluster_time()
start = time.monotonic()
if conn.more_to_come:
# Read the next streaming hello (MongoDB 4.4+).
response = Hello(conn._next_reply(), awaitable=True)
elif conn.performed_handshake and self._server_description.topology_version:
# Initiate streaming hello (MongoDB 4.4+).
response = conn._hello(
cluster_time,
self._server_description.topology_version,
self._settings.heartbeat_frequency,
)
else:
# New connection handshake or polling hello (MongoDB <4.4).
response = conn._hello(cluster_time, None, None)
return response, time.monotonic() - start
class SrvMonitor(MonitorBase):
def __init__(self, topology, topology_settings):
"""Class to poll SRV records on a background thread.
Pass a Topology and a TopologySettings.
The Topology is weakly referenced.
"""
super(SrvMonitor, self).__init__(
topology,
"pymongo_srv_polling_thread",
common.MIN_SRV_RESCAN_INTERVAL,
topology_settings.heartbeat_frequency,
)
self._settings = topology_settings
self._seedlist = self._settings._seeds
self._fqdn = self._settings.fqdn
def _run(self):
seedlist = self._get_seedlist()
if seedlist:
self._seedlist = seedlist
try:
self._topology.on_srv_update(self._seedlist)
except ReferenceError:
# Topology was garbage-collected.
self.close()
def _get_seedlist(self):
"""Poll SRV records for a seedlist.
Returns a list of ServerDescriptions.
"""
try:
resolver = _SrvResolver(
self._fqdn,
self._settings.pool_options.connect_timeout,
self._settings.srv_service_name,
)
seedlist, ttl = resolver.get_hosts_and_min_ttl()
if len(seedlist) == 0:
# As per the spec: this should be treated as a failure.
raise Exception
except Exception:
# As per the spec, upon encountering an error:
# - An error must not be raised
# - SRV records must be rescanned every heartbeatFrequencyMS
# - Topology must be left unchanged
self.request_check()
return None
else:
self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL))
return seedlist
class _RttMonitor(MonitorBase):
def __init__(self, topology, topology_settings, pool):
"""Maintain round trip times for a server.
The Topology is weakly referenced.
"""
super(_RttMonitor, self).__init__(
topology,
"pymongo_server_rtt_thread",
topology_settings.heartbeat_frequency,
common.MIN_HEARTBEAT_INTERVAL,
)
self._pool = pool
self._moving_average = MovingAverage()
self._lock = _create_lock()
def close(self):
self.gc_safe_close()
# Increment the generation and maybe close the socket. If the executor
# thread has the socket checked out, it will be closed when checked in.
self._pool.reset()
def add_sample(self, sample):
"""Add a RTT sample."""
with self._lock:
self._moving_average.add_sample(sample)
def average(self):
"""Get the calculated average, or None if no samples yet."""
with self._lock:
return self._moving_average.get()
def reset(self):
"""Reset the average RTT."""
with self._lock:
return self._moving_average.reset()
def _run(self):
try:
# NOTE: This thread is only run when when using the streaming
# heartbeat protocol (MongoDB 4.4+).
# XXX: Skip check if the server is unknown?
rtt = self._ping()
self.add_sample(rtt)
except ReferenceError:
# Topology was garbage-collected.
self.close()
except Exception:
self._pool.reset()
def _ping(self):
"""Run a "hello" command and return the RTT."""
with self._pool.get_socket() as sock_info:
if self._executor._stopped:
raise Exception("_RttMonitor closed")
start = time.monotonic()
sock_info.hello()
return time.monotonic() - start
# Close monitors to cancel any in progress streaming checks before joining
# executor threads. For an explanation of how this works see the comment
# about _EXECUTORS in periodic_executor.py.
_MONITORS = set()
def _register(monitor):
ref = weakref.ref(monitor, _unregister)
_MONITORS.add(ref)
def _unregister(monitor_ref):
_MONITORS.remove(monitor_ref)
def _shutdown_monitors():
if _MONITORS is None:
return
# Copy the set. Closing monitors removes them.
monitors = list(_MONITORS)
# Close all monitors.
for ref in monitors:
monitor = ref()
if monitor:
monitor.gc_safe_close()
monitor = None
def _shutdown_resources():
# _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown.
shutdown = _shutdown_monitors
if shutdown: # type:ignore[truthy-function]
shutdown()
shutdown = _shutdown_executors
if shutdown: # type:ignore[truthy-function]
shutdown()
atexit.register(_shutdown_resources)
| {
"content_hash": "24929a4310c65b153d1d008e60789991",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 98,
"avg_line_length": 34.57746478873239,
"alnum_prop": 0.5939579090291921,
"repo_name": "ShaneHarvey/mongo-python-driver",
"id": "44390e9180e264d3bd88698ad81e3cb20437bbd6",
"size": "15314",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymongo/monitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "183641"
},
{
"name": "Python",
"bytes": "2982667"
},
{
"name": "Shell",
"bytes": "30026"
}
],
"symlink_target": ""
} |
class Style(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'Font': 'Font',
'Name': 'str',
'CultureCustom': 'str',
'Custom': 'str',
'BackgroundColor': 'Color',
'ForegroundColor': 'Color',
'IsFormulaHidden': 'bool',
'IsDateTime': 'bool',
'IsTextWrapped': 'bool',
'IsGradient': 'bool',
'IsLocked': 'bool',
'IsPercent': 'bool',
'ShrinkToFit': 'bool',
'IndentLevel': 'int',
'Number': 'int',
'RotationAngle': 'int',
'Pattern': 'str',
'TextDirection': 'str',
'VerticalAlignment': 'str',
'HorizontalAlignment': 'str',
'BorderCollection': 'list[Border]',
'link': 'Link'
}
self.attributeMap = {
'Font': 'Font','Name': 'Name','CultureCustom': 'CultureCustom','Custom': 'Custom','BackgroundColor': 'BackgroundColor','ForegroundColor': 'ForegroundColor','IsFormulaHidden': 'IsFormulaHidden','IsDateTime': 'IsDateTime','IsTextWrapped': 'IsTextWrapped','IsGradient': 'IsGradient','IsLocked': 'IsLocked','IsPercent': 'IsPercent','ShrinkToFit': 'ShrinkToFit','IndentLevel': 'IndentLevel','Number': 'Number','RotationAngle': 'RotationAngle','Pattern': 'Pattern','TextDirection': 'TextDirection','VerticalAlignment': 'VerticalAlignment','HorizontalAlignment': 'HorizontalAlignment','BorderCollection': 'BorderCollection','link': 'link'}
self.Font = None # Font
self.Name = None # str
self.CultureCustom = None # str
self.Custom = None # str
self.BackgroundColor = None # Color
self.ForegroundColor = None # Color
self.IsFormulaHidden = None # bool
self.IsDateTime = None # bool
self.IsTextWrapped = None # bool
self.IsGradient = None # bool
self.IsLocked = None # bool
self.IsPercent = None # bool
self.ShrinkToFit = None # bool
self.IndentLevel = None # int
self.Number = None # int
self.RotationAngle = None # int
self.Pattern = None # str
self.TextDirection = None # str
self.VerticalAlignment = None # str
self.HorizontalAlignment = None # str
self.BorderCollection = None # list[Border]
self.link = None # Link
| {
"content_hash": "f7c854cf09abfc38bf3fb10ccebaa9b7",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 651,
"avg_line_length": 43.714285714285715,
"alnum_prop": 0.5773420479302832,
"repo_name": "asposecells/Aspose_Cells_Cloud",
"id": "7ab9e62793733f4926ce66732dceb708e1cf4cb1",
"size": "2777",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "SDKs/Aspose.Cells-Cloud-SDK-for-Python/asposecellscloud/models/Style.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "897367"
},
{
"name": "HTML",
"bytes": "110"
},
{
"name": "Java",
"bytes": "900042"
},
{
"name": "JavaScript",
"bytes": "664643"
},
{
"name": "Objective-C",
"bytes": "1142444"
},
{
"name": "PHP",
"bytes": "626745"
},
{
"name": "Python",
"bytes": "833397"
},
{
"name": "Ruby",
"bytes": "799033"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="tickformatstops",
parent_name="histogram.marker.colorbar",
**kwargs
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs
)
| {
"content_hash": "9f984e1c43c26acbdb86e620c5e2eeb1",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 84,
"avg_line_length": 43.01851851851852,
"alnum_prop": 0.5492897115798536,
"repo_name": "plotly/python-api",
"id": "1ff1860673ad567b9c8467e6fb28325f1961175e",
"size": "2323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram/marker/colorbar/_tickformatstops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import sys
#Removes genes for which no score good be computed
def isValidAffinity(lineSplit):
for i in range(1,len(lineSplit)):
if ("ENSG" in lineSplit[i]):
return False
for i in range(1,len(lineSplit)):
if (float(lineSplit[i]) != 0.0):
return True
return False
def main():
#Checking Affinity
infile=open(sys.argv[1],"r")
output=open(sys.argv[1].replace(".txt","_Filtered.txt"),"w")
#Copy header line
output.write(infile.readline())
#Check individual lines
for l in infile:
if (isValidAffinity(l.split())):
output.write(l)
infile.close()
output.close()
main()
| {
"content_hash": "598d8fe94b564b6cd46bff2bd3266321",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 61,
"avg_line_length": 21.925925925925927,
"alnum_prop": 0.6891891891891891,
"repo_name": "SchulzLab/TEPIC",
"id": "69c69b54ff3968f56a6cd8aaf0e2283df7af5bcc",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/filterGeneView.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "15935"
},
{
"name": "Python",
"bytes": "86251"
},
{
"name": "R",
"bytes": "59133"
},
{
"name": "Shell",
"bytes": "62615"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_dessert_air_cake.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "40f1d82be068f4600c9c994ee4f40dcc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.6918032786885245,
"repo_name": "obi-two/Rebelion",
"id": "467a37fd7170c5eca7262e395b1d735c752ef4b3",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/food/shared_dessert_air_cake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=too-many-lines
from collections import OrderedDict
import backoff
from streamalert.alert_processor.helpers import compose_alert
from streamalert.alert_processor.outputs.output_base import (
OutputDispatcher,
OutputProperty,
OutputRequestFailure,
StreamAlertOutput
)
from streamalert.shared.backoff_handlers import (
backoff_handler,
success_handler,
giveup_handler
)
from streamalert.shared.logger import get_logger
LOGGER = get_logger(__name__)
# https://support.pagerduty.com/docs/dynamic-notifications
SEVERITY_CRITICAL = 'critical'
SEVERITY_ERROR = 'error'
SEVERITY_WARNING = 'warning'
SEVERITY_INFO = 'info'
SEVERITY_UNKNOWN = 'unknown' # empty string and any string not in the above defaults to "unknown"
class PagerdutySearchDelay(Exception):
"""PagerdutyAlertDelay handles any delays looking up PagerDuty Incidents"""
class EventsV2DataProvider:
"""This class is meant to be mixed-into pagerduty outputs that integrate with v2 of the API
This is called the CommonEventFormat (PD-CEF). Documentation can be found here:
https://support.pagerduty.com/docs/pd-cef
"""
def events_v2_data(self, alert, descriptor, routing_key, with_record=True):
"""Helper method to generate the payload to create an event using PagerDuty Events API v2
(!) NOTE: this method will not work unless this class is mixed into an OutputDispatcher
Publishing:
By default the pagerduty event is setup with a blob of data comprising the rule
description and the record in the custom details. You can customize behavior with
the following fields:
- @pagerduty-v2:summary (str):
Modifies the title of the event
- @pagerduty-v2.custom_details (dict):
Fills out the pagerduty customdetails with this structure.
(!) NOTE: Due to PagerDuty's UI, it is extremely hard to read very deeply
nested JSON dicts. It is also extremely hard to read large blobs of data.
Try to collapse deeply nested structures into single-level keys, and
try to truncate blobs of data.
- @pagerduty-v2:severity (string):
By default the severity of alerts are "critical". You can override this with
any of the following:
'info', 'warning', 'error', 'critical'
Args:
descriptor (str): The descriptor of the output sending these data
alert (Alert): Alert relevant to the triggered rule
routing_key (str): Routing key for this PagerDuty integration
with_record (boolean): Option to add the record data or not
Returns:
dict: Contains JSON blob to be used as event
"""
publication = compose_alert(alert, self, descriptor)
# Presentation defaults
default_summary = 'StreamAlert Rule Triggered - {}'.format(alert.rule_name)
default_custom_details = OrderedDict()
default_custom_details['description'] = alert.rule_description
if with_record:
default_custom_details['record'] = alert.record
default_severity = SEVERITY_CRITICAL
# Special field that Publishers can use to customize the header
summary = publication.get('@pagerduty-v2.summary', default_summary)
details = publication.get('@pagerduty-v2.custom_details', default_custom_details)
severity = publication.get('@pagerduty-v2.severity', default_severity)
client_url = publication.get('@pagerduty-v2.client_url', None)
images = self._standardize_images(publication.get('@pagerduty-v2.images', []))
links = self._standardize_links(publication.get('@pagerduty-v2.links', []))
component = publication.get('@pagerduty-v2.component', None)
group = publication.get('@pagerduty-v2.group', None)
alert_class = publication.get('@pagerduty-v2.class', None)
# We namespace the dedup_key by the descriptor, preventing situations where a single
# alert sending to multiple PagerDuty services from having colliding dedup_keys, which
# would PROBABLY be ok (because of segregated environments) but why take the risk?
dedup_key = '{}:{}'.format(descriptor, alert.alert_id)
# Structure: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
return {
'routing_key': routing_key,
'event_action': 'trigger',
# Passing a dedup_key will ensure that only one event is ever created. Any subsequent
# request with the same dedup_key + routing_key + event_action will simply return
# the original result.
# Once the alert is resolved, the dedup_key can be re-used.
# https://v2.developer.pagerduty.com/docs/events-api-v2#alert-de-duplication
'dedup_key': dedup_key,
'payload': {
'summary': summary,
'source': alert.log_source,
'severity': severity,
'custom_details': details,
# When provided, must be in valid ISO 8601 format
# 'timestamp': '',
'component': component,
'group': group,
'class': alert_class,
},
'client': 'StreamAlert',
'client_url': client_url,
'images': images,
'links': links,
}
@staticmethod
def _standardize_images(images):
"""Strips invalid images out of the images argument
Images should be dicts with 3 keys:
- src: The full http URL of the image
- href: A URL that the image opens when clicked (Optional)
- alt: Alt text (Optional)
"""
if not isinstance(images, list):
return []
return [
{
# Notably, if href is provided but is an invalid URL, the entire image will
# be entirely omitted from the incident... beware.
'src': image['src'],
'href': image['href'] if 'href' in image else '',
'alt': image['alt'] if 'alt' in image else '',
}
for image in images
if isinstance(image, dict) and 'src' in image
]
@staticmethod
def _standardize_links(links):
"""Strips invalid links out of the links argument
Images should be dicts with 2 keys:
- href: A URL of the link
- text: Text of the link (Optional: Defaults to the href if no text given)
"""
if not isinstance(links, list):
return []
return [
{
'href': link['href'],
'text': link['text'] if 'text' in link else link['href'],
}
for link in links
if isinstance(link, dict) and 'href' in link
]
@StreamAlertOutput
class PagerDutyOutput(OutputDispatcher):
"""PagerDutyOutput handles all alert dispatching for PagerDuty Events API v1"""
__service__ = 'pagerduty'
@classmethod
def _get_default_properties(cls):
"""Get the standard url used for PagerDuty. This value the same for everyone, so
is hard-coded here and does not need to be configured by the user
Returns:
dict: Contains various default items for this output (ie: url)
"""
return {'url': PagerDutyEventsV1ApiClient.EVENTS_V1_API_ENDPOINT}
@classmethod
def get_user_defined_properties(cls):
"""Get properties that must be assigned by the user when configuring a new PagerDuty
output. This should be sensitive or unique information for this use-case that needs
to come from the user.
Every output should return a dict that contains a 'descriptor' with a description of the
integration being configured.
PagerDuty also requires a service_key that represnts this integration. This
value should be masked during input and is a credential requirement.
Returns:
OrderedDict: Contains various OutputProperty items
"""
return OrderedDict([
('descriptor',
OutputProperty(description='a short and unique descriptor for this '
'PagerDuty integration')),
# A version 4 UUID expressed as a 32 digit hexadecimal number. This is the
# integration key for an integration on a given service and can be found on
# the pagerduty integrations UI.
('service_key',
OutputProperty(description='the service key for this PagerDuty integration',
mask_input=True,
cred_requirement=True))
])
def _dispatch(self, alert, descriptor):
"""Send alert to Pagerduty
Publishing:
This output can be override with the following fields:
- @pagerduty.description (str):
The provided string will be rendered as the event's title.
- @pagerduty.details (dict):
By default this output renders rule description and rule record in a deeply
nested json structure. You can override this with your own dict.
(!) NOTE: Due to PagerDuty's UI, it is extremely hard to read very deeply
nested JSON dicts. It is also extremely hard to read large blobs of data.
Try to collapse deeply nested structures into single-level keys, and
try to truncate blobs of data.
- @pagerduty.client_url (str):
A URL. It should be a link to the same alert in a different service.
When given, there will be a "view in streamalert" link given at the bottom.
Currently this 'streamalert' string is hardcoded into the api client
as the 'client' field.
This is not included in the default implementation.
- @pagerduty.contexts (list[dict]):
This field can be used to automatically attach images and links to the incident
event. This should be a list of dicts. Each dict should follow ONE OF these
formats:
Link:
{
'type': 'link',
'href': 'https://streamalert.io/',
'text': 'Link Text'
}
Image embed
{
'type': 'image',
'src': 'https://streamalert.io/en/stable/_images/sa-complete-arch.png',
}
This is not included in the default implementation.
Args:
alert (Alert): Alert instance which triggered a rule
descriptor (str): Output descriptor
Returns:
bool: True if alert was sent successfully, False otherwise
"""
creds = self._load_creds(descriptor)
if not creds:
return False
# Presentation defaults
default_description = 'StreamAlert Rule Triggered - {}'.format(alert.rule_name)
default_details = {
'description': alert.rule_description,
'record': alert.record,
}
default_contexts = []
default_client_url = ''
# Override presentation with publisher
publication = compose_alert(alert, self, descriptor)
description = publication.get('@pagerduty.description', default_description)
details = publication.get('@pagerduty.details', default_details)
client_url = publication.get('@pagerduty.client_url', default_client_url)
contexts = publication.get('@pagerduty.contexts', default_contexts)
contexts = self._strip_invalid_contexts(contexts)
http = JsonHttpProvider(self)
client = PagerDutyEventsV1ApiClient(creds['service_key'], http, api_endpoint=creds['url'])
return client.send_event(description, details, contexts, client_url)
@staticmethod
def _strip_invalid_contexts(contexts):
"""When an array of contexts, will return a new array containing only valid ones."""
if not isinstance(contexts, list):
LOGGER.warning('Invalid pagerduty.contexts provided: Not an array')
return []
def is_valid_context(context):
if not 'type' in context:
return False
if context['type'] == 'link':
if 'href' not in context or 'text' not in context:
return False
elif context['type'] == 'image':
if 'src' not in context:
return False
else:
return False
return True
def standardize_context(context):
if context['type'] == 'link':
return {
'type': 'link',
'href': context['href'],
'text': context['text'],
}
return {
'type': 'image',
'src': context['src'],
}
return [standardize_context(x) for x in contexts if is_valid_context(x)]
@StreamAlertOutput
class PagerDutyOutputV2(OutputDispatcher, EventsV2DataProvider):
"""PagerDutyOutput handles all alert dispatching for PagerDuty Events API v2"""
__service__ = 'pagerduty-v2'
@classmethod
def _get_default_properties(cls):
"""Get the standard url used for PagerDuty Events API v2. This value the same for
everyone, so is hard-coded here and does not need to be configured by the user
Returns:
dict: Contains various default items for this output (ie: url)
"""
return {'url': PagerDutyEventsV2ApiClient.EVENTS_V2_API_ENQUEUE_ENDPOINT}
@classmethod
def get_user_defined_properties(cls):
"""Get properties that must be assigned by the user when configuring a new PagerDuty
event output. This should be sensitive or unique information for this use-case that
needs to come from the user.
Every output should return a dict that contains a 'descriptor' with a description of the
integration being configured.
PagerDuty also requires a routing_key that represents this integration. This
value should be masked during input and is a credential requirement.
Returns:
OrderedDict: Contains various OutputProperty items
"""
return OrderedDict([
('descriptor',
OutputProperty(description='a short and unique descriptor for this '
'PagerDuty integration')),
('routing_key',
OutputProperty(description='the routing key for this PagerDuty integration',
mask_input=True,
cred_requirement=True))
])
def _dispatch(self, alert, descriptor):
"""Send alert to Pagerduty
Publishing:
@see EventsV2DataProvider for more details
Args:
alert (Alert): Alert instance which triggered a rule
descriptor (str): Output descriptor
Returns:
bool: True if alert was sent successfully, False otherwise
"""
creds = self._load_creds(descriptor)
if not creds:
return False
data = self.events_v2_data(alert, descriptor, creds['routing_key'])
http = JsonHttpProvider(self)
client = PagerDutyEventsV2ApiClient(http, enqueue_endpoint=creds['url'])
result = client.enqueue_event(data)
if result is False:
return False
return True
@StreamAlertOutput
class PagerDutyIncidentOutput(OutputDispatcher, EventsV2DataProvider):
"""PagerDutyIncidentOutput handles all alert dispatching for PagerDuty Incidents REST API
In addition to creating an Alert through the EventsV2 API, this output will then find the
PagerDuty Incident that is created and automatically reassign, add more details, set priority,
add a note, and attach any additional responders to the incident.
Context:
- assigned_user (string):
Email address of user to assign the incident to. If omitted will default to
the service's default escalation policy. If the email address is not
associated with a user in PagerDuty, it will log a warning and default to
the service's escalation policy.
- with_record (bool):
True to include the entire record in the Alert's payload. False to omit it.
Will be superseded by certain @pagerduty-v2 fields.
- note (bool):
A text note that is added to the Incident. Will be superseded by publisher
fields (see below).
- responders (list<string>):
A list of email addresses of users to add as Requested Responders. If any
email address is not associated with a user in PagerDuty, it will be omitted
and a warning will be logged.
- responder_message (string)
Text string that shows up in Response Request messages sent to requested
responders.
Publishing:
This output has a more complex workflow. The magic publisher fields for @pagerduty-v2
ALSO are respected by this output.
- @pagerduty-incident.incident_title (str):
The provided string will show up in the PagerDuty incident's title.
The child Alert's
title is controlled by other publisher magic fields.
- @pagerduty-incident.note (str):
Due to legacy reasons, this PagerDuty services adds a note containing
"Creating SOX Incident" to the final PagerDuty incident. Providing a string
to this magic field will override that behavior.
- @pagerduty-incident.urgency (str):
Either "low" or "high". By default urgency is "high" for all incidents.
- @pagerduty-incident.incident_body (str):
@deprecated
This is a legacy field that no longer serves any functionality. It populates
a field on the PagerDuty Incident that is never visible.
@see Also EventsV2DataProvider for more details
"""
__service__ = 'pagerduty-incident'
INCIDENTS_ENDPOINT = 'incidents'
USERS_ENDPOINT = 'users'
POLICIES_ENDPOINT = 'escalation_policies'
SERVICES_ENDPOINT = 'services'
PRIORITIES_ENDPOINT = 'priorities'
BACKOFF_MAX = 5
BACKOFF_TIME = 5
def __init__(self, *args, **kwargs):
OutputDispatcher.__init__(self, *args, **kwargs)
self._base_url = None
self._headers = None
self._escalation_policy_id = None
@classmethod
def _get_default_properties(cls):
"""Get the standard url used for PagerDuty Incidents API v2. This value the same for
everyone, so is hard-coded here and does not need to be configured by the user
Returns:
dict: Contains various default items for this output (ie: url)
"""
return {'api': PagerDutyRestApiClient.REST_API_BASE_URL}
@classmethod
def get_user_defined_properties(cls):
"""Get properties that must be assigned by the user when configuring a new PagerDuty
event output. This should be sensitive or unique information for this use-case that
needs to come from the user.
Every output should return a dict that contains a 'descriptor' with a description of the
integration being configured.
PagerDuty also requires a routing_key that represents this integration. This
value should be masked during input and is a credential requirement.
Returns:
OrderedDict: Contains various OutputProperty items
"""
return OrderedDict([
('descriptor',
OutputProperty(description='a short and unique descriptor for this '
'PagerDuty integration')),
# The REST API Access Token. This needs to be generated through the PagerDuty console.
# Unlike the routing key this token is EXTREMELY IMPORTANT NOT TO LOSE as it grants
# access to all resources on PagerDuty, whereas the routing key only allows
# the generation of new events.
('token',
OutputProperty(description='the token for this PagerDuty integration',
mask_input=True,
cred_requirement=True)),
('service_name',
OutputProperty(description='the service name for this PagerDuty integration',
cred_requirement=True)),
# The service ID is the unique resource ID of a PagerDuty service, created through
# the UI. You can find the service id by looking at the URL:
# - www.pagerduty.com/services/PDBBCC9
#
# In the above case, the service id is 'PDBBCC9'
('service_id',
OutputProperty(description='the service ID for this PagerDuty integration',
cred_requirement=True)),
('escalation_policy',
OutputProperty(description='the name of the default escalation policy',
input_restrictions={},
cred_requirement=True)),
# The escalation policy ID is the unique resource ID of a PagerDuty escalation policy,
# created through the UI. You can find it on the URL:
# - www.pagerduty.com/escalation_policies#PDBBBB0
#
# In the above case, the escalation policy id is PDBBBB0
('escalation_policy_id',
OutputProperty(description='the ID of the default escalation policy',
cred_requirement=True)),
# This must exactly match the email address of a user on the PagerDuty account.
('email_from',
OutputProperty(description='valid user email from the PagerDuty '
'account linked to the token',
cred_requirement=True)),
# A version 4 UUID expressed as a 32 digit hexadecimal number. This is the same
# as the routing key that is used in the v2 Events API.
('integration_key',
OutputProperty(description='the integration key for this PagerDuty integration',
cred_requirement=True))
])
def _dispatch(self, alert, descriptor):
"""Send incident to Pagerduty Incidents REST API v2
Args:
alert (Alert): Alert instance which triggered a rule
descriptor (str): Output descriptor
Returns:
bool: True if alert was sent successfully, False otherwise
"""
creds = self._load_creds(descriptor)
if not creds:
return
work = WorkContext(self, creds)
return work.run(alert, descriptor)
class WorkContext:
"""Class encapsulating a bunch of self-contained, interdependent PagerDuty work.
Because PagerDuty work involves a lot of steps that share a lot of data, we carved this
section out.
"""
BACKOFF_MAX = 5
BACKOFF_TIME = 5
def __init__(self, output_dispatcher, credentials):
self._output = output_dispatcher
self._credentials = credentials
self._email_from = self._credentials['email_from']
self._default_escalation_policy_id = self._credentials['escalation_policy_id']
self._incident_service = self._credentials['service_id']
http = JsonHttpProvider(output_dispatcher)
self._api_client = PagerDutyRestApiClient(
self._credentials['token'],
self._credentials['email_from'],
http,
url=self._credentials['api']
)
self._events_client = PagerDutyEventsV2ApiClient(http)
# We cache the API User because we may use it multiple times
self._api_user = None
def run(self, alert, descriptor):
"""Sets up an assigned incident."""
if not self._verify_user_exists():
return False
# Extracting context data to assign the incident
rule_context = alert.context
if rule_context:
rule_context = rule_context.get(self._output.__service__, {})
publication = compose_alert(alert, self._output, descriptor)
# Create alert to hold all the incident details
event = self._create_base_alert_event(alert, descriptor, rule_context)
if not event:
LOGGER.error('[%s] Could not create incident event', self._output.__service__)
return False
# Create an incident to house the alert
incident = self._update_base_incident(event, alert, publication, rule_context)
if not incident:
LOGGER.error(
'[%s] Failed to update container incident for event',
self._output.__service__
)
return False
incident_id = incident.get('id', False)
if not incident_id:
LOGGER.error(
'[%s] Incident is missing "id"??',
self._output.__service__
)
return False
# At this point, both the incident and the relevant alert event have been successfully
# created.
#
# All of the code above this line is considered idempotent and can be called repeatedly
# without adverse side effects. Code BELOW this line is neither atomic nor idempotent, so
# we will not retry if any of the below code fails. Instead, we log an error and make a
# best-effort attempt to attach an error note to the PagerDuty incident, signalling that
# it was not setup properly.
#
# In the fateful event the alert gets stuck ANYWAY, the easiest solution is to destroy the
# associated record on the DynamoDB table.
errors = []
# Add responder requests
responders = rule_context.get('responders', [])
if responders and not isinstance(responders, list):
responders = [responders]
if responders:
# The message shows up in the email
default_message = 'An incident was reported that requires your attention.'
responder_message = rule_context.get('responder_message', default_message)
for responder_email in responders:
result = self._add_incident_response_request(
incident_id,
responder_email,
responder_message
)
if not result:
error = '[{}] Failed to request a responder ({}) on incident ({})'.format(
self._output.__service__,
responder_email,
incident_id
)
LOGGER.error(error)
errors.append(error)
# Add a note to the incident
note = self._add_incident_note(incident_id, publication, rule_context)
if not note:
error = '[{}] Failed to add note to incident ({})'.format(
self._output.__service__,
incident_id
)
LOGGER.error(error)
errors.append(error)
# If something went wrong, we can't throw an error anymore; log it on the Incident
if errors:
self._add_instability_note(incident_id, errors)
return True
def _add_instability_note(self, incident_id, errors):
error_section = '\n'.join(['- {}'.format(err) for err in errors])
instability_note = '''
StreamAlert failed to correctly setup this incident. Please contact your StreamAlert administrator.
Errors:
{}
'''.format(error_section).strip()
self._api_client.add_note(incident_id, instability_note)
def _update_base_incident(self, event, alert, publication, rule_context):
"""Given an event, will find the container incident and update it.
In PagerDuty's REST API design, Incidents are designed to behave like containers for many
alerts. Unlike alerts, Incidents can be given custom assignments and escalation policies.
When an alert is created through the EventsV2 API, PagerDuty automatically creates an
incident to contain it. The Incident resource that is created is given an "incident_key"
that is identical to the "dedup_key" of the Event.
Returns the updated incident as a JSON dict. Returns False if anything goes wrong.
"""
incident_key = event.get('dedup_key')
if not incident_key:
LOGGER.error(
'[%s] Event created is missing its "dedup_key"? %s',
self._output.__service__,
event
)
return False
event_incident_id = self._get_incident_id_from_event_incident_key(incident_key)
if not event_incident_id:
LOGGER.error(
'[%s] Failed to retrieve Event Incident Id from dedup_key (%s)',
self._output.__service__,
incident_key
)
return False
incident_data = self._construct_incident_put_request_data(alert, publication, rule_context)
return self._api_client.modify_incident(event_incident_id, incident_data)
def _construct_incident_put_request_data(self, alert, publication, rule_context):
"""Builds the payload for an HTTP PUT /incidents/:incident_id request
Returns it as a JSON dict
"""
# Presentation defaults
default_incident_title = 'StreamAlert Incident - Rule triggered: {}'.format(alert.rule_name)
default_incident_body = alert.rule_description
default_urgency = None # Assumes the default urgency on the service referenced
# Override presentation defaults with publisher fields
incident_title = publication.get(
'@pagerduty-incident.incident_title',
default_incident_title
)
incident_body = publication.get('@pagerduty-incident.incident_body', default_incident_body)
incident_urgency = publication.get('@pagerduty-incident.urgency', default_urgency)
# https://api-reference.pagerduty.com/#!/Incidents/post_incidents
incident_data = {
'incident': {
'type': 'incident',
'title': incident_title,
'service': {
'id': self._incident_service,
'type': 'service_reference'
},
'body': {
'type': 'incident_body',
# Notably, the incident body is basically useless and doesn't show up on the
# UI if the Incident has an alert attached to it.
'details': incident_body,
},
# The incident_key behaves very similarly to the deduplication key, but subsequent
# requests to create a second incident with the same incident_key will return an
# HTTP 400 instead of returning the original result.
# https://v2.developer.pagerduty.com/docs/incident-creation-api#making-a-request
#
# The incident_key is a super bizarre field.
#
# AS-FAR-AS-I-CAN-TELL it functions something like this:
#
# - If you create an incident with incident_key A, any subsequent requests to
# create another incident with the same incident_key will return an HTTP 400
# - If you create an event using EventsV2 API (with or without a dedup_key), the
# associated incident_key of the incident that is automatically created from
# the event will be the same as the dedup_key
# - If you create an event with EventsV2 API and attempt to then create an incident
# with an incident_key that is the same as the dedup_key, instead of returning
# an HTTP 400, it will return the incident that was originally created from the
# EventsV2 API... "idempotently".
#
# 'incident_key': '',
}
}
incident_priority = self._get_standardized_priority(rule_context)
if incident_priority:
incident_data['incident']['priority'] = incident_priority
assignments = self._get_incident_assignments(rule_context)
if assignments:
incident_data['incident']['assignments'] = assignments
else:
# Important detail;
# 'assignments' and 'escalation_policy' seem to be exclusive. If you send both, the
# 'escalation_policy' seems to supersede any custom assignments you have.
escalation_policy = self._get_incident_escalation_policy(rule_context)
incident_data['incident']['escalation_policy'] = escalation_policy
# Urgency, if provided, must always be 'high' or 'low' or the API will error
if incident_urgency:
if incident_urgency in ['low', 'high']:
incident_data['incident']['urgency'] = incident_urgency
else:
LOGGER.warning(
'[%s] Invalid pagerduty incident urgency: "%s"',
self._output.__service__,
incident_urgency
)
return incident_data
def _get_incident_assignments(self, rule_context):
assignments = False
user_to_assign = rule_context.get('assigned_user', False)
# If provided, verify the user and get the id from API
if user_to_assign:
user = self._api_client.get_user_by_email(user_to_assign)
if user and user.get('id'):
assignments = [{'assignee': {
'id': user.get('id'),
'type': 'user_reference',
}}]
else:
LOGGER.warning(
'[%s] Assignee (%s) could not be found in PagerDuty',
self._output.__service__,
user_to_assign
)
return assignments
def _get_incident_escalation_policy(self, rule_context):
# If escalation policy ID was not provided, use default one
policy_id_to_assign = rule_context.get(
'assigned_policy_id',
self._default_escalation_policy_id
)
# Assigned to escalation policy ID, return tuple
return {
'id': policy_id_to_assign,
'type': 'escalation_policy_reference'
}
def _create_base_alert_event(self, alert, descriptor, rule_context):
"""Creates an alert on REST API v2
Returns the JSON representation of the ENQUEUE RESPONSE. This actually does not return
either the alert nor the incident itself, but rather a small acknowledgement structure
containing a "dedup_key". This key can be used to find the incident that is created.
This method is idempotent. The calls out to PagerDuty will create a new alert+incident,
or return the existing one if this method has already been called.
Returns False if event was not created.
"""
with_record = rule_context.get('with_record', True)
event_data = self._output.events_v2_data(
alert,
descriptor,
self._credentials['integration_key'],
with_record=with_record
)
return self._events_client.enqueue_event(event_data)
def _add_incident_response_request(self, incident_id, responder_email, message):
responder = self._api_client.get_user_by_email(responder_email)
if not responder:
LOGGER.error(
'Could not verify if requested incident responder "%s" exists',
responder_email
)
return False
return bool(self._api_client.request_responder(
incident_id,
self._api_user.get('id'),
message,
responder.get('id')
))
def _add_incident_note(self, incident_id, publication, rule_context):
"""Adds a note to the incident, when applicable.
Returns:
bool: True if the note was created or no note needed to be created, False on error.
"""
# Add a note to the combined incident to help with triage
default_incident_note = 'Creating SOX Incident' # For reverse compatibility reasons
incident_note = publication.get(
'@pagerduty-incident.note',
rule_context.get(
'note',
default_incident_note
)
)
if not incident_note:
# Simply return early without adding a note; no need to add a blank one
return True
return bool(self._api_client.add_note(incident_id, incident_note))
@backoff.on_exception(backoff.constant,
PagerdutySearchDelay,
max_tries=BACKOFF_MAX,
interval=BACKOFF_TIME,
on_backoff=backoff_handler(),
on_success=success_handler(),
on_giveup=giveup_handler())
def _get_incident_id_from_event_incident_key(self, incident_key):
"""Queries the API to get the incident id from an incident key
When creating an EVENT from the events-v2 API, events are created alongside an incident,
but only an incident_key is returned, which is not the same as the incident's REST API
resource id.
(!) WARNING: This method can sometimes fail even if an event was successfully created.
Pagerduty can sometimes experience a small amount of "lag time" between when an
Event is created and when its containing Incident is searchable via this API.
Therefore, all code that calls this method must account for the possibility that this
method can be inconsistent with the state of the "real world", and should retry as
appropriate.
"""
if not incident_key:
return False
event_incident = self._api_client.get_incident_by_key(incident_key)
if not event_incident:
raise PagerdutySearchDelay('Received no PagerDuty response')
return event_incident.get('id')
def _verify_user_exists(self):
"""Verifies that the 'email_from' provided in the creds is valid and exists."""
user = self._api_client.get_user_by_email(self._email_from)
if not user:
LOGGER.error(
'Could not verify header From: %s, %s',
self._email_from,
self._output.__service__
)
return False
self._api_user = user
return True
def _get_standardized_priority(self, context):
"""Method to verify the existence of a incident priority with the API
Uses the priority provided in the context. When omitted the incident defaults to low
priority.
Args:
context (dict): Context provided in the alert record
Returns:
dict|False: JSON object be used in the API call, containing the priority id
and the priority reference, False if it fails or it does not exist
"""
if not context:
return False
# FIXME (derek.wang) use publisher to set priority instead of context
priority_name = context.get('incident_priority', False)
if not priority_name:
return False
priorities = self._api_client.get_priorities()
if not priorities:
return False
# If the requested priority is in the list, get the id
priority_id = next(
(item for item in priorities if item["name"] == priority_name), {}
).get('id', False)
# If the priority id is found, compose the JSON
if priority_id:
return {'id': priority_id, 'type': 'priority_reference'}
return False
# pylint: disable=protected-access
class JsonHttpProvider:
"""Wraps and re-uses the HTTP implementation on the output dispatcher.
Intended to de-couple the ApiClient classes from the OutputDispatcher. It re-uses some
HTTP implementation that's baked into the OutputDispatcher. It is safe to ignore the
breach-of-abstraction violations here.
"""
def __init__(self, output_dispatcher):
self._output_dispatcher = output_dispatcher
def get(self, url, params, headers=None, verify=False):
"""Returns the JSON response of the given request, or FALSE on failure"""
try:
result = self._output_dispatcher._get_request_retry(url, params, headers, verify)
except OutputRequestFailure as e:
LOGGER.error('Encountered HTTP error on GET %s: %s', url, e.response)
return False
response = result.json()
if not response:
return False
return response
def post(self, url, data, headers=None, verify=False):
"""Returns the JSON response of the given request, or FALSE on failure"""
try:
result = self._output_dispatcher._post_request_retry(url, data, headers, verify)
except OutputRequestFailure as e:
LOGGER.error('Encountered HTTP error on POST %s: %s', url, e.response)
return False
response = result.json()
if not response:
return False
return response
def put(self, url, params, headers=None, verify=False):
"""Returns the JSON response of the given request, or FALSE on failure"""
try:
result = self._output_dispatcher._put_request_retry(url, params, headers, verify)
except OutputRequestFailure as e:
LOGGER.error('Encountered HTTP error on PUT %s: %s', url, e.response)
return False
response = result.json()
if not response:
return False
return response
class SslVerifiable:
"""Mixin for tracking whether or not this is an SSL verifiable.
Mix this into API client types of classes.
The idea is to only do host ssl certificate verification on the very first time a unique
host is hit, since the handshake process is slow. Subsequent requests to the same host
within the current request can void certificate verification to speed things up.
"""
def __init__(self):
self._host_ssl_verified = False
def _should_do_ssl_verify(self):
"""Returns whether or not the client should perform SSL host cert verification"""
return not self._host_ssl_verified
def _update_ssl_verified(self, response):
"""
Args:
response (dict|bool): A return value from JsonHttpProvider
Returns:
dict|bool: Simply returns the response as-is
"""
if response is not False:
self._host_ssl_verified = True
return response
class PagerDutyRestApiClient(SslVerifiable):
"""API Client class for the PagerDuty REST API
API Documentation can be found here: https://v2.developer.pagerduty.com/docs/rest-api
"""
REST_API_BASE_URL = 'https://api.pagerduty.com'
def __init__(self, authorization_token, user_email, http_provider, url=None):
super(PagerDutyRestApiClient, self).__init__()
self._authorization_token = authorization_token
self._user_email = user_email
self._http_provider = http_provider # type: JsonHttpProvider
self._base_url = url if url else self.REST_API_BASE_URL
def get_user_by_email(self, user_email):
"""Fetches a pagerduty user by an email address.
Returns false on failure or if no matching user is found.
"""
response = self._http_provider.get(
self._get_users_url(),
{
'query': user_email,
},
self._construct_headers(omit_email=True),
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(response)
if not response:
return False
users = response.get('users', [])
return users[0] if users else False
def get_incident_by_key(self, incident_key):
"""Fetches an incident resource given its key
Returns False on failure or if no matching incident is found.
"""
incidents = self._http_provider.get(
self._get_incidents_url(),
{
'incident_key': incident_key # Beware: this key is intentionally not "query"
},
headers=self._construct_headers(),
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(incidents)
if not incidents:
return False
incidents = incidents.get('incidents', [])
return incidents[0] if incidents else False
def get_priorities(self):
"""Returns a list of all valid priorities"""
priorities = self._http_provider.get(
self._get_priorities_url(),
None,
headers=self._construct_headers(),
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(priorities)
if not priorities:
return False
return priorities.get('priorities', [])
def get_escalation_policy_by_id(self, escalation_policy_id):
"""Given an escalation policy id, returns the resource
Returns False on failure or if no escalation policy exists with that id
"""
escalation_policies = self._http_provider.get(
self._get_escalation_policies_url(),
{
'query': escalation_policy_id,
},
headers=self._construct_headers(),
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(escalation_policies)
if not escalation_policies:
return False
escalation_policies = escalation_policies.get('escalation_policies', [])
return escalation_policies[0] if escalation_policies else False
def modify_incident(self, incident_id, incident_data):
"""Modifies an existing Incident
Returns the incident json representation on success, or False on failure.
Reference: https://api-reference.pagerduty.com/#!/Incidents/post_incidents
Args:
incident_data (dict)
Returns:
dict
"""
incident = self._http_provider.put(
self._get_incident_url(incident_id),
incident_data,
headers=self._construct_headers(),
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(incident)
if not incident:
return False
return incident.get('incident', False)
def add_note(self, incident_id, note):
"""Method to add a text note to the provided incident id
Returns the note json representation on success, or False on failure.
Reference: https://api-reference.pagerduty.com/#!/Incidents/post_incidents_id_notes
Args:
incident_id (str): ID of the incident to add the note to
Returns:
str: ID of the note after being added to the incident or False if it fails
"""
note = self._http_provider.post(
self._get_incident_notes_url(incident_id),
{
'note': {
'content': note,
}
},
self._construct_headers(),
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(note)
if not note:
return False
return note.get('note', False)
def request_responder(self, incident_id, requester_user_id, message, responder_user_id):
# Be very careful with this API endpoint, there are several things you will need to know:
#
# 1) The requester_id MUST match the user associated with the API token
# 2) Both the requester_id and the responder id must have pagerduty accounts. If EITHER
# of them don't, this API endpoint actually exhibits strange behavior; instead of
# returning an HTTP 400 with a useful error message, it will return an HTTP 404.
# 3) You cannot add a requester to an incident that is resolved, it will also 404.
responder_request = self._http_provider.post(
self._get_incident_responder_requests_url(incident_id),
{
'requester_id': requester_user_id,
'message': message,
'responder_request_targets': [
{
'responder_request_target': {
'id': responder_user_id,
'type': 'user_reference',
}
}
]
},
self._construct_headers(),
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(responder_request)
if not responder_request:
return False
return responder_request.get('responder_request', False)
def _construct_headers(self, omit_email=False):
"""Returns a dict containing all headers to send for PagerDuty requests
PagerDuty performs validation on the email provided in the From: header. PagerDuty will
error if the requested email does not exist. In one specific case, we do not want this to
happen; when we are querying for the existence of a user with this email.
"""
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={}'.format(self._authorization_token),
'Content-Type': 'application/json',
}
if not omit_email:
headers['From'] = self._user_email
return headers
def _get_escalation_policies_url(self):
return '{base_url}/escalation_policies'.format(base_url=self._base_url)
def _get_priorities_url(self):
return '{base_url}/priorities'.format(base_url=self._base_url)
def _get_incidents_url(self):
return '{base_url}/incidents'.format(base_url=self._base_url)
def _get_incident_url(self, incident_id):
return '{incidents_url}/{incident_id}'.format(
incidents_url=self._get_incidents_url(),
incident_id=incident_id
)
def _get_incident_notes_url(self, incident_id):
return '{incident_url}/notes'.format(incident_url=self._get_incident_url(incident_id))
def _get_incident_responder_requests_url(self, incident_id):
return '{incident_url}/responder_requests'.format(
incident_url=self._get_incident_url(incident_id)
)
def _get_users_url(self):
return '{base_url}/users'.format(base_url=self._base_url)
class PagerDutyEventsV2ApiClient(SslVerifiable):
"""Service for finding URLs of various resources on the Events v2 API
Documentation on Events v2 API: https://v2.developer.pagerduty.com/docs/events-api-v2
"""
EVENTS_V2_API_ENQUEUE_ENDPOINT = 'https://events.pagerduty.com/v2/enqueue'
def __init__(self, http_provider, enqueue_endpoint=None):
super(PagerDutyEventsV2ApiClient, self).__init__()
self._http_provider = http_provider # type: JsonHttpProvider
self._enqueue_endpoint = (
enqueue_endpoint if enqueue_endpoint else self.EVENTS_V2_API_ENQUEUE_ENDPOINT
)
def enqueue_event(self, event_data):
"""Enqueues a new event.
Returns the event json representation on success, or False on failure.
Note: For API v2, all authentication information is baked directly into the event_data,
rather than being provided in the headers.
"""
event = self._http_provider.post(
self._get_event_enqueue_v2_url(),
event_data,
headers=None,
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(event)
return event
def _get_event_enqueue_v2_url(self):
if self._enqueue_endpoint:
return self._enqueue_endpoint
return '{}'.format(self.EVENTS_V2_API_ENQUEUE_ENDPOINT)
class PagerDutyEventsV1ApiClient(SslVerifiable):
"""Service for finding URLs of various resources on the Events v1 API
API Documentation can be found here: https://v2.developer.pagerduty.com/docs/events-api
"""
EVENTS_V1_API_ENDPOINT = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
EVENT_TYPE_TRIGGER = 'trigger'
EVENT_TYPE_ACKNOWLEDGE = 'acknowledge'
EVENT_TYPE_RESOLVE = 'resolve'
CLIENT_STREAMALERT = 'streamalert'
def __init__(self, service_key, http_provider, api_endpoint=None):
super(PagerDutyEventsV1ApiClient, self).__init__()
self._service_key = service_key
self._http_provider = http_provider # type: JsonHttpProvider
self._api_endpoint = api_endpoint if api_endpoint else self.EVENTS_V1_API_ENDPOINT
def send_event(self, incident_description, incident_details, contexts, client_url=''):
"""
Args:
incident_description (str): The title of the alert
incident_details (dict): Arbitrary JSON object that is rendered in custom details field
contexts (array): Array of context dicts, which can be used to embed links or images.
client_url (string): An external URL that appears as a link on the event.
Return:
dict: The JSON representation of the created event
"""
# Structure of body: https://v2.developer.pagerduty.com/docs/trigger-events
data = {
'service_key': self._service_key,
'event_type': self.EVENT_TYPE_TRIGGER,
'description': incident_description,
'details': incident_details,
'client': self.CLIENT_STREAMALERT,
'client_url': client_url,
'contexts': contexts,
}
result = self._http_provider.post(
self._api_endpoint,
data,
headers=None,
verify=self._should_do_ssl_verify()
)
self._update_ssl_verified(result)
return result
| {
"content_hash": "b0aeb8a71faa495881408915e212d4e2",
"timestamp": "",
"source": "github",
"line_count": 1415,
"max_line_length": 100,
"avg_line_length": 39.51378091872792,
"alnum_prop": 0.6025897839462012,
"repo_name": "airbnb/streamalert",
"id": "b9f78e27c78ed420c9e5db3e27d0df311722787e",
"size": "55912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streamalert/alert_processor/outputs/pagerduty.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "142275"
},
{
"name": "Python",
"bytes": "2209853"
},
{
"name": "Shell",
"bytes": "2975"
}
],
"symlink_target": ""
} |
import logging
from sys import argv
from makehex.lexer import lex
from makehex.parser import parser
from makehex.config import parse_config
_stream_handler = logging.StreamHandler()
_stream_handler.setLevel(logging.WARNING)
_stream_handler.setFormatter(logging.Formatter("%(levelname)s [%(asctime)s] %(filename)-15s %(message)s"))
logging.root.addHandler(_stream_handler)
_stop_msg = "{0} Program stopped! {0}".format('=' * 15)
# noinspection PyBroadException
try:
if len(argv) < 3 or '--help' in argv or '-h' in argv:
print("Usage: makehex INPUT_FILE OUTPUT_FILE [CONFIG_FILE]")
with open(argv[1]) as input_file, open(argv[2], "wb+") as output_file:
co = parse_config(argv[3] if len(argv) >= 4 else None)
logging.debug("input file: %s; output file: %s", input_file.name, output_file.name)
parser()(lex(input_file.read()), 0).value.write(output_file, *co)
except:
logging.exception(_stop_msg)
else:
logging.debug(_stop_msg)
| {
"content_hash": "60da0ecd5a888f062e5c2caf86c3673d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 106,
"avg_line_length": 36.18518518518518,
"alnum_prop": 0.6929375639713409,
"repo_name": "Pasha13666/makehex",
"id": "8b9f6071acec82db0433e5de8a3c07863ef72654",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makehex/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25985"
}
],
"symlink_target": ""
} |
BOT_NAME = 'social_scraper'
SPIDER_MODULES = ['social_scraper.spiders']
NEWSPIDER_MODULE = 'social_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'social_scraper (+http(s)://www.yourdomain.com)'
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': None,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': None,
}
# PIPELINES
ITEM_PIPELINES = {
'social_scraper.pipelines.RedisPipeline': 100,
}
# EXTERNAL CREDENTIALS
TWITTER_APP_KEY = ''
TWITTER_TOKEN = ''
FACEBOOK_TOKEN = ''
# REDIS
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# API
API_HOST = '0.0.0.0'
API_PORT = 8080
API_DEBUG_MODE = True
# CELERY
CELERY_BROKER_URL='redis://localhost:6379/2'
CELERY_RESULT_BACKEND='redis://localhost:6379/2'
CELERYD_MAX_TASKS_PER_CHILD=1
CELERY_IMPORTS=['social_scraper.webapi']
| {
"content_hash": "0abec23af9fc3ffe29059b5c615c8ae7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 24.324324324324323,
"alnum_prop": 0.7366666666666667,
"repo_name": "piotrpawlaczek/social_scraper",
"id": "230b495941fc5395623c5343cf69456bb081db81",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_scraper/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14476"
}
],
"symlink_target": ""
} |
"""
Problem Statement
Given an array of integers Y=[y1,y2,…,yn], we have n line segments, such that, the endpoints of ith segment are (i,0)
and (i,yi). Imagine that from the top of each segment a horizontal ray is shot to the left, and this ray stops when it
touches another segment or it hits the y-axis. We construct an array of n integers, [v1,v2,…,vn], where vi is equal to
length of ray shot from the top of segment i. We define V(y1,y2,…,yn)=v1+v2+…+vn.
For example, if we have Y=[3,2,5,3,3,4,1,2], then v1,v2,…,v8=[1,1,3,1,1,3,1,2], as shown in the picture below:
For each permutation p of [1,2,…,n], we can calculate V(yp1,yp2,…,ypn). If we choose a uniformly random permutation p of
[1,2,…,n], what is the expected value of V(yp1,yp2,…,ypn)?
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
approach this problem by independently calculating the expected value of vi for each stick
http://cs.stackexchange.com/questions/1076/how-to-approach-vertical-sticks-challenge
Start with a round table with n seats and k people and seat them at random. Distances between individuals
are obviously i.i.d. with mean n/k.
Now straighten the table to line, k+1 lines (including the y-axis), and the virtual circle of the line is (n+1)
Complexity: O(N^2)
:param cipher: the cipher
"""
N, A = cipher
l = N+1
E = 0
for cur in A:
k = 0
for a in A:
if a>=cur:
k += 1 # including itself
E += float(l)/(k+1)
return "%.2f" % E
if __name__=="__main__":
import sys
f = open("0.in", "r")
# f = sys.stdin
solution = Solution()
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N = int(f.readline().strip())
A = map(int, f.readline().strip().split(' '))
cipher = N, A
# solve
s = "%s\n" % (solution.solve(cipher))
print s, | {
"content_hash": "105c78753dc91d39e2211ec8e9dbe813",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 120,
"avg_line_length": 35.41379310344828,
"alnum_prop": 0.5968841285296982,
"repo_name": "ee08b397/HackerRankAlgorithms",
"id": "f29dfa00ac4bec80cbdf868ed7108015b418744a",
"size": "2096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Vertical Sticks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5450"
},
{
"name": "Python",
"bytes": "205033"
}
],
"symlink_target": ""
} |
import base64
import copy
import datetime
import functools
import os
import string
import tempfile
import fixtures
from oslo.config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.image import s3
from nova.network import api as network_api
from nova.network import quantumv2
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack.compute.contrib import (
test_quantum_security_groups as test_quantum)
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests import matchers
from nova import utils
from nova.virt import fake as fake_virt
from nova import volume
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('default_instance_type', 'nova.compute.flavors')
CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
HOST = "testhost"
def get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3',
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return info
def get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if isinstance(instances, list):
for instance in instances:
instance['info_cache'] = {'network_info': get_fake_cache()}
else:
instances['info_cache'] = {'network_info': get_fake_cache()}
return instances
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
ec2utils.reset_cache()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
self.useFixture(fixtures.FakeLogger('boto'))
def fake_show(meh, context, id):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(_self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
fake.stub_out_image_service(self.stubs)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.volume_api = volume.API()
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.api.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.api.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
self.volume_api.reset_fake_api(self.context)
super(CloudTestCase, self).tearDown()
fake.FakeImageService_reset()
def fake_get_target(obj, iqn):
return 1
def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
pass
def _stub_instance_get_with_fixed_ips(self, func_name):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, *args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
keypair_api = compute_api.KeypairAPI()
return keypair_api.create_key_pair(self.context, self.context.user_id,
name)
def test_describe_regions(self):
# Makes sure describe regions runs without raising an exception.
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
# Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_specific_address(self):
# Makes sure describe specific address works.
addresses = ["10.10.10.10", "10.10.10.11"]
for address in addresses:
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
result = self.cloud.describe_addresses(self.context)
self.assertEqual(len(result['addressesSet']), 2)
result = self.cloud.describe_addresses(self.context,
public_ip=['10.10.10.10'])
self.assertEqual(len(result['addressesSet']), 1)
for address in addresses:
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_allocate_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.assertEqual(allocate(self.context)['publicIp'], address)
db.floating_ip_destroy(self.context, address)
self.assertRaises(exception.NoMoreFloatingIps,
allocate,
self.context)
def test_release_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova',
'project_id': self.project_id})
result = self.cloud.release_address(self.context, address)
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
# Verifies associate runs cleanly without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_uuid=inst['uuid'],
host=inst['host'],
vpn=None,
rxtx_factor=3,
project_id=project_id,
macs=None)
fixed_ips = nw_info.fixed_ips()
ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
lambda *args: {'fixed_ips': ['10.0.0.1'],
'fixed_ip6s': [],
'floating_ips': []})
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
self.cloud.disassociate_address(self.context,
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
inst['host'])
db.instance_destroy(self.context, inst['uuid'])
db.floating_ip_destroy(self.context, address)
def test_disassociate_auto_assigned_address(self):
"""Verifies disassociating auto assigned floating IP
raises an exception
"""
address = "10.10.10.10"
def fake_get(*args, **kwargs):
pass
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
self.stubs.Set(network_api.API, 'disassociate_floating_ip',
fake_disassociate_floating_ip)
self.assertRaises(exception.EC2APIError,
self.cloud.disassociate_address,
self.context, public_ip=address)
def test_disassociate_unassociated_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.assertRaises(exception.InstanceNotFound,
self.cloud.disassociate_address,
self.context, public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[sec['name']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_all_tenants(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': 'foobar',
'name': 'test'})
def _check_name(result, i, expected):
self.assertEqual(result['securityGroupInfo'][i]['groupName'],
expected)
# include all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 2)
_check_name(result, 0, 'default')
_check_name(result, 1, sec['name'])
# exclude all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
# default all tenants
result = self.cloud.describe_security_groups(self.context)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context,
group_id=[sec['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
default = db.security_group_get_by_name(self.context,
self.context.project_id,
'default')
result = self.cloud.describe_security_groups(self.context,
group_id=[default['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
'default')
db.security_group_destroy(self.context, sec['id'])
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
def test_security_group_quota_limit(self):
self.flags(quota_security_groups=10)
for i in range(1, CONF.quota_security_groups + 1):
name = 'test name %i' % i
descript = 'test description %i' % i
create = self.cloud.create_security_group
result = create(self.context, name, descript)
# 11'th group should fail
self.assertRaises(exception.EC2APIError,
create, self.context, 'foo', 'bar')
def test_delete_security_group_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, group_id=sec['id']))
def test_delete_security_group_with_bad_name(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, 'badname')
def test_delete_security_group_with_bad_group_id(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, group_id=999)
def test_delete_security_group_no_params(self):
delete = self.cloud.delete_security_group
self.assertRaises(exception.EC2APIError, delete, self.context)
def test_authorize_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':
{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_fail_missing_source_group(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}},
'ip_protocol': u'tcp'}]}
self.assertRaises(exception.SecurityGroupNotFound, authz,
self.context, group_name=sec['name'], **kwargs)
def test_authorize_security_group_ingress_ip_permissions_groups(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'somegroup1'})
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'othergroup2'})
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'},
'2': {'user_id': u'someuser',
'group_name': u'othergroup2'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_describe_security_group_ingress_groups(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec1 = db.security_group_create(self.context, kwargs)
sec2 = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'somegroup1'})
sec3 = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'othergroup2'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [
{'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}}},
{'ip_protocol': 'tcp',
'from_port': 80,
'to_port': 80,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'othergroup2'}}}]}
self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEquals(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
self.assertEquals(len(actual_rules), 4)
expected_rules = [{'fromPort': -1,
'groups': [{'groupName': 'somegroup1',
'userId': 'someuser'}],
'ipProtocol': 'icmp',
'ipRanges': [],
'toPort': -1},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'tcp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'udp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 80,
'groups': [{'groupName': u'othergroup2',
'userId': u'someuser'}],
'ipProtocol': u'tcp',
'ipRanges': [],
'toPort': 80}]
for rule in expected_rules:
self.assertTrue(rule in actual_rules)
db.security_group_destroy(self.context, sec3['id'])
db.security_group_destroy(self.context, sec2['id'])
db.security_group_destroy(self.context, sec1['id'])
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
def test_authorize_revoke_security_group_ingress_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
def test_authorize_security_group_ingress_missing_protocol_params(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.EC2APIError, authz, self.context, 'test')
def test_authorize_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.EC2APIError, authz, self.context, **kwargs)
def test_authorize_security_group_ingress_already_exists(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
self.assertRaises(exception.EC2APIError, authz, self.context,
group_name=sec['name'], **kwargs)
def test_security_group_ingress_quota_limit(self):
self.flags(quota_security_group_rules=20)
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec_group = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
for i in range(100, 120):
kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec_group['id'], **kwargs)
kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
self.assertRaises(exception.EC2APIError, authz, self.context,
group_id=sec_group['id'], **kwargs)
def _test_authorize_security_group_no_ports_with_source_group(self, proto):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto,
'groups': {'1': {'user_id': self.context.user_id,
'group_name': u'test'}}}
self.assertTrue(authz(self.context, group_name=sec['name'],
**auth_kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEquals(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
expected_rules = [{'groups': [{'groupName': 'test',
'userId': self.context.user_id}],
'ipProtocol': proto,
'ipRanges': []}]
if proto == 'icmp':
expected_rules[0]['fromPort'] = -1
expected_rules[0]['toPort'] = -1
else:
expected_rules[0]['fromPort'] = 1
expected_rules[0]['toPort'] = 65535
self.assertTrue(expected_rules == actual_rules)
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
db.security_group_destroy(self.context, sec['id'])
def _test_authorize_security_group_no_ports_no_source_group(self, proto):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto}
self.assertRaises(exception.EC2APIError, authz, self.context,
group_name=sec['name'], **auth_kwargs)
db.security_group_destroy(self.context, sec['id'])
def test_authorize_security_group_no_ports_icmp(self):
self._test_authorize_security_group_no_ports_with_source_group('icmp')
self._test_authorize_security_group_no_ports_no_source_group('icmp')
def test_authorize_security_group_no_ports_tcp(self):
self._test_authorize_security_group_no_ports_with_source_group('tcp')
self._test_authorize_security_group_no_ports_no_source_group('tcp')
def test_authorize_security_group_no_ports_udp(self):
self._test_authorize_security_group_no_ports_with_source_group('udp')
self._test_authorize_security_group_no_ports_no_source_group('udp')
def test_revoke_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
revoke = self.cloud.revoke_security_group_ingress
self.assertRaises(exception.EC2APIError, revoke,
self.context, **kwargs)
def test_delete_security_group_in_use_by_group(self):
group1 = self.cloud.create_security_group(self.context, 'testgrp1',
"test group 1")
group2 = self.cloud.create_security_group(self.context, 'testgrp2',
"test group 2")
kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
'group_name': u'testgrp2'}},
}
self.cloud.authorize_security_group_ingress(self.context,
group_name='testgrp1', **kwargs)
group1 = db.security_group_get_by_name(self.context,
self.project_id, 'testgrp1')
get_rules = db.security_group_rule_get_by_security_group
self.assertTrue(get_rules(self.context, group1['id']))
self.cloud.delete_security_group(self.context, 'testgrp2')
self.assertFalse(get_rules(self.context, group1['id']))
def test_delete_security_group_in_use_by_instance(self):
# Ensure that a group can not be deleted if in use by an instance.
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active'}
inst = db.instance_create(self.context, args)
args = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'name': 'testgrp',
'description': 'Test group'}
group = db.security_group_create(self.context, args)
db.instance_add_security_group(self.context, inst['uuid'], group['id'])
self.assertRaises(exception.InvalidGroup,
self.cloud.delete_security_group,
self.context, 'testgrp')
db.instance_destroy(self.context, inst['uuid'])
self.cloud.delete_security_group(self.context, 'testgrp')
def test_describe_availability_zones(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
# Aggregate based zones
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
agg = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
result = self.cloud.describe_availability_zones(self.context)
self.assertEqual(len(result['availabilityZoneInfo']), 3)
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 16)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_availability_zones_verbose(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'second_zone'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 15)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_instances(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type(1))
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'hostname': 'server-4321',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1')
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
agg2 = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg2['id'], 'host2')
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
# Now try filtering.
instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['placement']['availabilityZone'],
'zone2')
self.assertEqual(instance['publicDnsName'], '1.2.3.4')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['tagSet'], [])
self.assertEqual(instance['privateDnsName'], 'server-4321')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertEqual(instance['dnsNameV6'],
'fe80:b33f::a8bb:ccff:fedd:eeff')
# A filter with even one invalid id should cause an exception to be
# raised
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id, '435679'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instances_all_invalid(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
instance_id = ec2utils.id_to_ec2_inst_id('435679')
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id])
def test_describe_instances_with_filters(self):
# Makes sure describe_instances works and filters results.
filters = {'filter': [{'name': 'test',
'value': ['a', 'b']},
{'name': 'another_test',
'value': 'a string'}]}
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': []})
def test_describe_instances_with_tag_filters(self):
# Makes sure describe_instances works and filters tag results.
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create some test images
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type(1))
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1),
'system_metadata': sys_meta
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'vm_state': 'active',
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2),
'system_metadata': sys_meta
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, one overlapping key, and a
# disparate pair
# inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
# inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
md = {'key': 'foo', 'value': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
md2 = {'key': 'baz', 'value': 'wibble'}
md3 = {'key': 'bax', 'value': 'wobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md2, md3])
md4 = {'key': 'baz', 'value': 'quux'}
md5 = {'key': 'zog', 'value': 'bobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md4, md5])
# We should be able to search by:
inst1_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000001',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': u'm1.medium',
'ipAddress': '1.2.3.4',
'keyName': 'None (None, host1)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 1),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1111',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'publicDnsName': '1.2.3.4',
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'wibble'},
{'key': u'bax',
'value': u'wobble'}]}],
'ownerId': None,
'reservationId': u'a'}
inst2_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000002',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': u'm1.medium',
'ipAddress': '1.2.3.4',
'keyName': u'None (None, host2)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 2),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1112',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'publicDnsName': '1.2.3.4',
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'quux'},
{'key': u'zog',
'value': u'bobble'}]}],
'ownerId': None,
'reservationId': u'b'}
# No filter
result = self.cloud.describe_instances(self.context)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Key search
# Both should have tags with key 'foo' and value 'bar'
filters = {'filter': [{'name': 'tag:foo',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Both should have tags with key 'foo'
filters = {'filter': [{'name': 'tag-key',
'value': ['foo']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Value search
# Only inst2 should have tags with key 'baz' and value 'quux'
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# Only inst2 should have tags with value 'quux'
filters = {'filter': [{'name': 'tag-value',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# Multiple values
# Both should have tags with key 'baz' and values in the set
# ['quux', 'wibble']
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux', 'wibble']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Both should have tags with key 'baz' or tags with value 'bar'
filters = {'filter': [{'name': 'tag-key',
'value': ['baz']},
{'name': 'tag-value',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# destroy the test instances
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
def test_describe_instances_sorting(self):
# Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type(1))
inst_base = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'system_metadata': sys_meta,
}
inst1_kwargs = {}
inst1_kwargs.update(inst_base)
inst1_kwargs['host'] = 'host1'
inst1_kwargs['hostname'] = 'server-1111'
inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1)
inst1 = db.instance_create(self.context, inst1_kwargs)
inst2_kwargs = {}
inst2_kwargs.update(inst_base)
inst2_kwargs['host'] = 'host2'
inst2_kwargs['hostname'] = 'server-2222'
inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1)
inst2 = db.instance_create(self.context, inst2_kwargs)
inst3_kwargs = {}
inst3_kwargs.update(inst_base)
inst3_kwargs['host'] = 'host3'
inst3_kwargs['hostname'] = 'server-3333'
inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1)
inst3 = db.instance_create(self.context, inst3_kwargs)
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]['instancesSet']
self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst3['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
# Makes sure describe_instances for instanceState works.
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type(1))
values = values or {}
values.update({'image_ref': image_uuid, 'instance_type_id': 1,
'power_state': power_state_, 'vm_state': vm_state_,
'system_metadata': sys_meta})
inst = db.instance_create(self.context, values)
instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
result = result['instancesSet'][0]['instanceState']
name = result['name']
code = result['code']
self.assertEqual(code, expected_code)
self.assertEqual(name, expected_name)
db.instance_destroy(self.context, inst['uuid'])
test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
power_state.RUNNING, vm_states.ACTIVE)
test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
power_state.NOSTATE, vm_states.STOPPED,
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
# Makes sure describe_instances w/ no ipv6 works.
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type(1))
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['publicDnsName'], '1.2.3.4')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['privateDnsName'], 'server-1234')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertNotIn('dnsNameV6', instance)
db.instance_destroy(self.context, inst1['uuid'])
db.service_destroy(self.context, comp1['id'])
def test_describe_instances_deleted(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context, inst1['uuid'])
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 1)
result1 = result['reservationSet'][0]['instancesSet']
self.assertEqual(result1[0]['instanceId'],
ec2utils.id_to_ec2_inst_id(inst2['uuid']))
def test_describe_instances_with_image_deleted(self):
image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst2 = db.instance_create(self.context, args2)
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 2)
def test_describe_images(self):
describe_images = self.cloud.describe_images
def fake_detail(meh, context, **kwargs):
return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}}]
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
def fake_detail_none(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
self.assertEqual(result1['imageId'], 'ami-00000001')
# provided a valid image_id
result2 = describe_images(self.context, ['ami-00000001'])
self.assertEqual(1, len(result2['imagesSet']))
# provide more than 1 valid image_id
result3 = describe_images(self.context, ['ami-00000001',
'ami-00000002'])
self.assertEqual(2, len(result3['imagesSet']))
# provide a non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertTrue(key in d1)
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
self.assertThat(d1, matchers.DictMatches(d2))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
{'device_name': '/dev/sdb2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
{'device_name': '/dev/sdc2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7'}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context, **kwargs):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
snap = self._snapshot_create(bdm['snapshot_id'])
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertTrue('rootDeviceType' in result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertTrue('rootDeviceName' in result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertTrue('blockDeviceMapping' in result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00000001'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00000001'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00000002'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00000002'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00000003'}}]
# NOTE(yamahata):
# InstanceBlockDeviceMappingItemType
# rootDeviceType
# rootDeviceName
# blockDeviceMapping
# deviceName
# virtualName
# ebs
# snapshotId
# volumeSize
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
# test for rootDeviceName and blockDeviceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
result = describe_images(self.context, ['ami-00000001'])
result = self._assertImageSet(result, 'instance-store',
self._expected_root_device_name1)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_images(self.context, ['ami-00000002'])
result = self._assertImageSet(result, 'ebs',
self._expected_root_device_name2)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
def fake_show(meh, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'is_public': True}
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
result = describe_image_attribute(self.context, 'ami-00000001',
'kernel')
self.assertEqual('aki-00000001', result['kernel']['value'])
result = describe_image_attribute(self.context, 'ami-00000001',
'ramdisk')
self.assertEqual('ari-00000001', result['ramdisk']['value'])
def test_describe_image_attribute_root_device_name(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name1)
result = describe_image_attribute(self.context, 'ami-00000002',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name2)
def test_describe_image_attribute_block_device_mapping(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_image_attribute(self.context, 'ami-00000002',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
fake_metadata = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'is_public': False}
def fake_show(meh, context, id):
return copy.deepcopy(fake_metadata)
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
def fake_update(meh, context, image_id, metadata, data=None):
self.assertEqual(metadata['properties']['kernel_id'],
fake_metadata['properties']['kernel_id'])
self.assertEqual(metadata['properties']['ramdisk_id'],
fake_metadata['properties']['ramdisk_id'])
self.assertTrue(metadata['is_public'])
image = copy.deepcopy(fake_metadata)
image.update(metadata)
return image
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
self.assertTrue(result['is_public'])
def test_register_image(self):
register_image = self.cloud.register_image
def fake_create(*args, **kwargs):
# NOTE(vish): We are mocking s3 so make sure we have converted
# to ids instead of uuids.
return {'id': 1,
'name': 'fake_name',
'container_format': 'ami',
'properties': {'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'
},
'is_public': False
}
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
image_location = 'fake_bucket/fake.img.manifest.xml'
result = register_image(self.context, image_location)
self.assertEqual(result['imageId'], 'ami-00000001')
def test_register_image_empty(self):
register_image = self.cloud.register_image
self.assertRaises(exception.EC2APIError, register_image, self.context,
image_location=None)
def test_register_image_name(self):
register_image = self.cloud.register_image
def fake_create(_self, context, metadata, data=None):
self.assertEqual(metadata['name'], self.expected_name)
metadata['id'] = 1
metadata['container_format'] = 'ami'
metadata['is_public'] = False
return metadata
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
self.expected_name = 'fake_bucket/fake.img.manifest.xml'
result = register_image(self.context,
image_location=self.expected_name,
name=None)
self.expected_name = 'an image name'
result = register_image(self.context,
image_location='some_location',
name=self.expected_name)
def test_format_image(self):
image = {
'id': 1,
'container_format': 'ami',
'name': 'name',
'owner': 'someone',
'properties': {
'image_location': 'location',
'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'},
'is_public': False}
expected = {'name': 'name',
'imageOwnerId': 'someone',
'isPublic': False,
'imageId': 'ami-00000001',
'imageState': None,
'rootDeviceType': 'instance-store',
'architecture': None,
'imageLocation': 'location',
'kernelId': 'aki-00000001',
'ramdiskId': 'ari-00000001',
'rootDeviceName': '/dev/sda1',
'imageType': 'machine',
'description': None}
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['properties']['image_location'] = None
expected['imageLocation'] = 'None (name)'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['name'] = None
image['properties']['image_location'] = 'location'
expected['imageLocation'] = 'location'
expected['name'] = 'location'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
def test_deregister_image(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertTrue(result)
# invalid image
self.stubs.UnsetAll()
def fake_detail_empty(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
def test_deregister_image_wrong_container_type(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
self.assertRaises(exception.NotFound, deregister_image, self.context,
'aki-00000001')
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def test_get_password_data(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_instance_type,
max_count=1)
self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
output = self.cloud.get_password_data(context=self.context,
instance_id=[instance_id])
self.assertEquals(output['passwordData'], 'fakepass')
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_instance_type,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(base64.b64decode(output['output']),
'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_key_generation(self):
result = self._create_key('test')
private_key = result['private_key']
expected = db.key_pair_get(self.context,
self.context.user_id,
'test')['public_key']
(fd, fname) = tempfile.mkstemp()
os.write(fd, private_key)
public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
os.unlink(fname)
# assert key fields are equal
self.assertEqual(''.join(public_key.split("\n")[2:-2]),
expected.split(" ")[1].strip())
def test_describe_key_pairs(self):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_describe_bad_key_pairs(self):
self.assertRaises(exception.KeypairNotFound,
self.cloud.describe_key_pairs, self.context,
key_name=['DoesNotExist'])
def test_import_key_pair(self):
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
dummyfprint = f.readline().rstrip()
f.close
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
result = self.cloud.import_key_pair(self.context,
key_name,
public_key_material)
self.assertEqual(result['keyName'], key_name)
self.assertEqual(result['keyFingerprint'], dummyfprint)
keydata = db.key_pair_get(self.context,
self.context.user_id,
key_name)
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
def test_import_key_pair_quota_limit(self):
self.flags(quota_key_pairs=0)
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
dummyfprint = f.readline().rstrip()
f.close
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
self.assertRaises(exception.EC2APIError,
self.cloud.import_key_pair, self.context, key_name,
public_key_material)
def test_create_key_pair(self):
good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
bad_names = ('', 'a' * 256, '*', '/')
for key_name in good_names:
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
for key_name in bad_names:
self.assertRaises(exception.InvalidKeypair,
self.cloud.create_key_pair,
self.context,
key_name)
def test_create_key_pair_quota_limit(self):
self.flags(quota_key_pairs=10)
for i in range(0, 10):
key_name = 'key_%i' % i
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
# 11'th group should fail
self.assertRaises(exception.EC2APIError,
self.cloud.create_key_pair,
self.context,
'foo')
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'running')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_availability_zone(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_instance_type,
'max_count': 1,
'placement': {'availability_zone': 'fake'},
}
run_instances = self.cloud.run_instances
def fake_show(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
def fake_format(*args, **kwargs):
pass
self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['availability_zone'], 'fake')
return ({'id': 'fake-instance'}, 'fake-res-id')
self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
# NOTE(vish) the assert for this call is in the fake_create method.
run_instances(self.context, **kwargs)
def test_run_instances_image_state_none(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_no_state(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}, 'container_format': 'ami'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.EC2APIError, run_instances,
self.context, **kwargs)
def test_run_instances_image_state_invalid(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_decrypt(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.EC2APIError, run_instances,
self.context, **kwargs)
def test_run_instances_image_status_active(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_stat_active(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
def fake_id_to_glance_id(context, id):
return 'cedef40a-ed67-4d10-800e-17455edce175'
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval_max:
self.compute = self.start_service(
'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
# Makes sure stop/start instance works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.start_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
def test_start_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
result = self.cloud.start_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_stop_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_invalid_instance_id(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
self.assertRaises(exception.InstanceNotFound,
self.cloud.terminate_instances,
self.context, ['i-2'])
self._restart_compute_service()
def test_terminate_instances_disable_terminate(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
ec2utils.ec2_id_to_id(instance_id))
instance = db.instance_update(self.context, internal_uuid,
{'disable_terminate': True})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 16,
'name': 'running'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
instance = db.instance_update(self.context, internal_uuid,
{'disable_terminate': False})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_two_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [inst1])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}},
{'instanceId': 'i-00000002',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [inst1, inst2])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_reboot_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.reboot_instances(self.context, [instance_id])
self.assertTrue(result)
def _volume_create(self, volume_id=None):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
'host': 'fake',
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
def _snapshot_create(self, snapshot_id=None):
kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
'status': "available",
'volume_size': 1}
if snapshot_id:
kwargs['snap_id'] = snapshot_id
return self.volume_api.create_snapshot_with_kwargs(self.context,
**kwargs)
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def _do_test_create_image(self, no_reboot):
"""Make sure that CreateImage works."""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_show(meh, context, id):
bdm = [dict(snapshot_id=snapshots[0],
volume_size=1,
device_name='sda1',
delete_on_termination=False)]
props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
root_device_name='/dev/sda1',
block_device_mapping=bdm)
return dict(id=id,
properties=props,
container_format='ami',
status='active',
is_public=True)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def fake_block_device_mapping_get_all_by_instance(context, inst_id):
return [dict(id=1,
snapshot_id=snapshots[0],
volume_id=volumes[0],
virtual_name=None,
volume_size=1,
device_name='sda1',
delete_on_termination=False,
no_device=None,
connection_info='{"foo":"bar"}')]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
virt_driver = {}
def fake_power_on(self, instance):
virt_driver['powered_on'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
def fake_power_off(self, instance):
virt_driver['powered_off'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
result = self.cloud.create_image(self.context, ec2_instance_id,
no_reboot=no_reboot)
ec2_ids = [result['imageId']]
created_image = self.cloud.describe_images(self.context,
ec2_ids)['imagesSet'][0]
self.assertTrue('blockDeviceMapping' in created_image)
bdm = created_image['blockDeviceMapping'][0]
self.assertEquals(bdm.get('deviceName'), 'sda1')
self.assertTrue('ebs' in bdm)
self.assertEquals(bdm['ebs'].get('snapshotId'),
ec2utils.id_to_ec2_snap_id(snapshots[0]))
self.assertEquals(created_image.get('kernelId'), 'aki-00000001')
self.assertEquals(created_image.get('ramdiskId'), 'ari-00000002')
self.assertEquals(created_image.get('rootDeviceType'), 'ebs')
self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
self._restart_compute_service()
def test_create_image_no_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(True)
def test_create_image_with_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(False)
def test_create_image_instance_store(self):
"""
Ensure CreateImage fails as expected for an instance-store-backed
instance
"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_block_device_mapping_get_all_by_instance(context, inst_id):
return [dict(snapshot_id=snapshots[0],
volume_id=volumes[0],
virtual_name=None,
volume_size=1,
device_name='vda',
delete_on_termination=False,
no_device=None)]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
self.assertRaises(exception.InvalidParameterValue,
self.cloud.create_image,
self.context,
ec2_instance_id,
no_reboot=True)
@staticmethod
def _fake_bdm_get(ctxt, id):
return [{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': 98765432,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdi'},
{'volume_id': None,
'snapshot_id': None,
'no_device': True,
'virtual_name': None,
'delete_on_termination': None,
'device_name': None},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral0',
'delete_on_termination': None,
'device_name': '/dev/sdb'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral1',
'delete_on_termination': None,
'device_name': '/dev/sdd'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral2',
'delete_on_termination': None,
'device_name': '/dev/sd3'},
]
def test_describe_instance_attribute(self):
# Make sure that describe_instance_attribute works.
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
def fake_get(ctxt, instance_id):
inst_type = flavors.get_default_instance_type()
inst_type['name'] = 'fake_type'
sys_meta = flavors.save_instance_type_info({}, inst_type)
sys_meta = utils.dict_to_metadata(sys_meta)
return {
'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': '/dev/sdh',
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
'vm_state': vm_states.STOPPED,
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'user_data': 'fake-user data',
'shutdown_terminate': False,
'disable_terminate': False,
'system_metadata': sys_meta,
}
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
def fake_get_instance_uuid_by_ec2_id(ctxt, int_id):
if int_id == 305419896:
return 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
raise exception.InstanceNotFound(instance_id=int_id)
self.stubs.Set(db, 'get_instance_uuid_by_ec2_id',
fake_get_instance_uuid_by_ec2_id)
get_attribute = functools.partial(
self.cloud.describe_instance_attribute,
self.context, 'i-12345678')
bdm = get_attribute('blockDeviceMapping')
bdm['blockDeviceMapping'].sort()
expected_bdm = {'instance_id': 'i-12345678',
'rootDeviceType': 'ebs',
'blockDeviceMapping': [
{'deviceName': '/dev/sdh',
'ebs': {'status': 'attached',
'deleteOnTermination': True,
'volumeId': 'vol-05397fb1',
'attachTime': '13:56:24'}}]}
expected_bdm['blockDeviceMapping'].sort()
self.assertEqual(bdm, expected_bdm)
groupSet = get_attribute('groupSet')
groupSet['groupSet'].sort()
expected_groupSet = {'instance_id': 'i-12345678',
'groupSet': [{'groupId': 'fake0'},
{'groupId': 'fake1'}]}
expected_groupSet['groupSet'].sort()
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
'instanceInitiatedShutdownBehavior': 'stop'})
self.assertEqual(get_attribute('disableApiTermination'),
{'instance_id': 'i-12345678',
'disableApiTermination': False})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
self.assertEqual(get_attribute('kernel'),
{'instance_id': 'i-12345678',
'kernel': 'aki-00000001'})
self.assertEqual(get_attribute('ramdisk'),
{'instance_id': 'i-12345678',
'ramdisk': 'ari-00000002'})
self.assertEqual(get_attribute('rootDeviceName'),
{'instance_id': 'i-12345678',
'rootDeviceName': '/dev/sdh'})
# NOTE(yamahata): this isn't supported
# get_attribute('sourceDestCheck')
self.assertEqual(get_attribute('userData'),
{'instance_id': 'i-12345678',
'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
def test_instance_initiated_shutdown_behavior(self):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior"""
kwargs.update({'instance_type': CONF.default_instance_type,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
result = self.cloud.describe_instance_attribute(self.context,
instance_id, 'instanceInitiatedShutdownBehavior')
self.assertEqual(result['instanceInitiatedShutdownBehavior'],
expected_result)
expected = {'instancesSet': [
{'instanceId': instance_id,
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context,
[instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
test_dia_iisb('stop', image_id='ami-1')
block_device_mapping = [{'device_name': '/dev/vdb',
'virtual_name': 'ephemeral0'}]
test_dia_iisb('stop', image_id='ami-2',
block_device_mapping=block_device_mapping)
def fake_show(self, context, id_):
LOG.debug("id_ %s", id_)
prop = {}
if id_ == 'ami-3':
pass
elif id_ == 'ami-4':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}]}
elif id_ == 'ami-5':
prop = {'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
elif id_ == 'ami-6':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}],
'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}
prop_base.update(prop)
return {
'id': id_,
'name': 'fake_name',
'properties': prop_base,
'container_format': 'ami',
'status': 'active'}
# NOTE(yamahata): create ami-3 ... ami-6
# ami-1 and ami-2 is already created by setUp()
for i in range(3, 7):
db.api.s3_image_create(self.context, 'ami-%d' % i)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
test_dia_iisb('stop', image_id='ami-3')
test_dia_iisb('stop', image_id='ami-4')
test_dia_iisb('stop', image_id='ami-5')
test_dia_iisb('stop', image_id='ami-6')
def test_create_delete_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create a test image
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
# Create some tags
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id],
tag=[md])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
# Delete them
self.cloud.delete_tags(self.context, resource_id=[ec2_id],
tag=[{'key': 'foo', 'value': 'bar'}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, {})
self.assertEqual(meta_changes, [{'foo': ['-']}])
def test_describe_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create some test images
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, and each has a different key value pair
# inst1 : {'foo': 'bar', 'bax': 'wibble'}
# inst1 : {'foo': 'bar', 'baz': 'quux'}
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md_result)
md2 = {'key': 'baz', 'value': 'quux'}
md2_result = {'baz': 'quux'}
md2_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md2])
self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md2_result)
md3 = {'key': 'bax', 'value': 'wibble'}
md3_result = {'bax': 'wibble'}
md3_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md3])
self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md3_result)
inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'bar'}
inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'wibble'}
inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'bar'}
inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'quux'}
# We should be able to search by:
# No filter
tags = self.cloud.describe_tags(self.context)['tagSet']
self.assertEqual(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Resource ID
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource_id',
'value': [ec2_id1]}])['tagSet']
self.assertEqual(tags, [inst1_key_foo, inst1_key_bax])
# Resource Type
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource_type',
'value': ['instance']}])['tagSet']
self.assertEqual(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Key, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['foo']}])['tagSet']
self.assertEqual(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']}])['tagSet']
self.assertEqual(tags, [inst2_key_baz])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['ba?']}])['tagSet']
self.assertEqual(tags, [])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['b*']}])['tagSet']
self.assertEqual(tags, [])
# Value, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['bar']}])['tagSet']
self.assertEqual(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['wi*']}])['tagSet']
self.assertEqual(tags, [])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['quu?']}])['tagSet']
self.assertEqual(tags, [])
# Multiple values
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz', 'bax']}])['tagSet']
self.assertEqual(tags, [inst2_key_baz, inst1_key_bax])
# Multiple filters
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']},
{'name': 'value',
'value': ['wibble']}])['tagSet']
self.assertEqual(tags, [inst2_key_baz, inst1_key_bax])
# And we should fail on supported resource types
self.assertRaises(exception.EC2APIError,
self.cloud.describe_tags,
self.context,
filter=[{'name': 'resource_type',
'value': ['instance', 'volume']}])
def test_resource_type_from_id(self):
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'i-12345'),
'instance')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'r-12345'),
'reservation')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'vol-12345'),
'volume')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'snap-12345'),
'snapshot')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ami-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ari-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'aki-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'x-12345'),
None)
class CloudTestCaseQuantumProxy(test.TestCase):
def setUp(self):
cfg.CONF.set_override('security_group_api', 'quantum')
self.cloud = cloud.CloudController()
self.original_client = quantumv2.get_client
quantumv2.get_client = test_quantum.get_client
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
super(CloudTestCaseQuantumProxy, self).setUp()
def tearDown(self):
quantumv2.get_client = self.original_client
test_quantum.get_client()._reset()
super(CloudTestCaseQuantumProxy, self).tearDown()
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[group_name])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_describe_security_groups_by_id(self):
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
quantum = test_quantum.get_client()
# Get id from quantum since cloud.create_security_group
# does not expose it.
search_opts = {'name': group_name}
groups = quantum.list_security_groups(
**search_opts)['security_groups']
result = self.cloud.describe_security_groups(self.context,
group_id=[groups[0]['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
| {
"content_hash": "c4214deba40bcd71f615bad2b50d2803",
"timestamp": "",
"source": "github",
"line_count": 2698,
"max_line_length": 79,
"avg_line_length": 45.146404744255,
"alnum_prop": 0.522162472804893,
"repo_name": "sridevikoushik31/nova",
"id": "e58d260fbf7f9bffa199baf4f05770ae22dea1a8",
"size": "122644",
"binary": false,
"copies": "1",
"ref": "refs/heads/port_id_in_vif_on_devide",
"path": "nova/tests/api/ec2/test_cloud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9944606"
},
{
"name": "Ruby",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
from django.db import migrations
def update_ram_usage(apps, schema_editor):
bytes_in_mb = 2 ** 20
app_label = 'waldur_slurm'
Allocation = apps.get_model(app_label, 'Allocation')
for allocation in Allocation.objects.all():
if allocation.ram_usage != 0:
allocation.ram_usage = allocation.ram_usage // bytes_in_mb
allocation.save(update_fields=['ram_usage'])
AllocationUsage = apps.get_model(app_label, 'AllocationUsage')
for allocation_usage in AllocationUsage.objects.all():
if allocation_usage.ram_usage != 0:
allocation_usage.ram_usage = allocation_usage.ram_usage // bytes_in_mb
allocation_usage.save(update_fields=['ram_usage'])
AllocationUserUsage = apps.get_model(app_label, 'AllocationUserUsage')
for allocation_user_usage in AllocationUserUsage.objects.all():
if allocation_user_usage.ram_usage != 0:
allocation_user_usage.ram_usage = (
allocation_user_usage.ram_usage // bytes_in_mb
)
allocation_user_usage.save(update_fields=['ram_usage'])
class Migration(migrations.Migration):
dependencies = [
('waldur_slurm', '0010_change_default_ram_limit'),
]
operations = [migrations.RunPython(update_ram_usage)]
| {
"content_hash": "0f5fc33857ffcf246d7236baccb3289b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 82,
"avg_line_length": 37.05714285714286,
"alnum_prop": 0.6576715497301465,
"repo_name": "opennode/nodeconductor-assembly-waldur",
"id": "4fd5a51347ec0cd5f055f694c971b4309898e742",
"size": "1297",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_slurm/migrations/0011_change_ram_usage_to_mb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "412263"
},
{
"name": "Shell",
"bytes": "2031"
}
],
"symlink_target": ""
} |
import threading
import logging
import queue
import pymongo
import numpy as np
from time import sleep
class Annotator(threading.Thread):
'''
Handles manual annotations.
Queries database for uncertain statuses, presents and presents them to the
user.
Arguments:
---------------
database: pymongo connection
train_event: threading event. To communicate with Trainer
train_threshold: int, number of annotations (for each class) before training
starts.
Methods:
---------------
run
'''
def __init__(self, data, train_threshold=1):
super(Annotator, self).__init__(name='Annotator')
self.database = data['database']
self.train = data['events']['train_model']
self.stoprequest = threading.Event()
self.n_positive = False
self.n_negative = False
self.train_threshold = train_threshold
self.annotation_response = data['queues']['annotation_response']
self.socket = data['socket']
self.annotated_text = {}
self.message_queue = data['queues']['messages']
self.n_trainer_triggered = 0
self.clf_performance = {
'true_positive': 0,
'true_negative': 0,
'false_positive': 0,
'false_negative': 0
}
self.first = True
def run(self):
logging.debug('Ready!')
while not self.stoprequest.isSet():
# Every third annotation is an evaluation run
eval_run = np.random.choice([True, False], size=1, p=[0.3,0.7])[0]
# Look for work:
not_annotated = self.database.find({'manual_relevant': None,
'probability_relevant': {
'$ne': None
}})
# If no work, wait and try again
if not_annotated.count() == 0:
if self.first:
self.socket.emit('display_tweet', {'tweet_id': 'waiting'})
self.first = False
sleep(0.05)
continue
if not eval_run:
work = not_annotated.sort('annotation_priority',
pymongo.ASCENDING).limit(1)
else:
work = not_annotated.limit(1)
for status in work:
if self.annotation_response.full():
self.annotation_response.get()
id_ = str(status['id'])
guess = str(round(status['probability_relevant'], 3))
logging.debug(f'Sending tweet for annotation. Id: {id_}'
f'evaluation: {eval_run}')
self.socket.emit('display_tweet', {'tweet_id': id_,
'guess': guess,
'eval': str(eval_run)})
if eval_run:
p = round(status['probability_relevant'], 2)
self.message_queue.put('This is an evaluation Tweet '
'I guess it is relevant with '
f'probability {p}')
while not self.stoprequest.isSet():
try:
response = self.annotation_response.get(timeout=0.1)
logging.debug(f'Received response {response}')
break
except queue.Empty as e:
continue
if response == 'relevant':
out = True
self.n_positive += 1
elif response == 'irrelevant':
out = False
self.n_negative += 1
elif response == 'skip':
out = -1
elif response == 'refresh':
continue
else:
logging.debug(f'Invalid response: {response}')
continue
self.first = True
# Evaluate classifier
if self.n_trainer_triggered > 0 and eval_run:
guess = bool(round(status['probability_relevant'], 0))
self.clf_performance[self.evaluate_guess(guess, out)] += 1
# Update record in DB
logging.debug('updating DB')
msg = self.database.update(
{'_id': status['_id']},
{'$set': {'manual_relevant': out,
'probability_relevant': int(out),
'annotation_priority': None,
'clf_version': float('inf')}}
)
# Trigger trainer if necessary
logging.debug('triggering trainer')
threshold = (self.n_trainer_triggered+1) * self.train_threshold
if (self.n_positive > threshold): #and
#self.n_negative > threshold):
self.train.set()
self.n_trainer_triggered += 1
logging.debug('Stopped.')
def evaluate_guess(self, guess, annotation):
if guess and annotation:
return 'true_positive'
if not guess and not annotation:
return 'true_negative'
if not guess and annotation:
return 'false_negative'
if guess and not annotation:
return 'false_positive'
def join(self, timeout=None):
self.stoprequest.set()
super(Annotator, self).join(timeout)
| {
"content_hash": "8f24bbc8deb5a179ca7da3389cde8070",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 80,
"avg_line_length": 37.51923076923077,
"alnum_prop": 0.4614727490175978,
"repo_name": "flinder/active_stream",
"id": "1d6f85cf2fa94afbd86d8a1ccb6f698b2734d7c7",
"size": "5853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "active_stream/annotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1070"
},
{
"name": "HTML",
"bytes": "9393"
},
{
"name": "JavaScript",
"bytes": "9351"
},
{
"name": "Python",
"bytes": "34336"
}
],
"symlink_target": ""
} |
"""ovirt-host-setup vmconsole_proxy plugin."""
from otopi import util
from . import core
@util.export
def createPlugins(context):
core.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
| {
"content_hash": "6e55bad9a32e860798a5a15dcf733072",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 46,
"avg_line_length": 14.066666666666666,
"alnum_prop": 0.7345971563981043,
"repo_name": "walteryang47/ovirt-engine",
"id": "861d4b36fd31722184e57e43e533908eb7d33a4e",
"size": "841",
"binary": false,
"copies": "7",
"ref": "refs/heads/eayunos-4.2",
"path": "packaging/setup/plugins/ovirt-engine-common/vmconsole_proxy_helper/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68312"
},
{
"name": "HTML",
"bytes": "16218"
},
{
"name": "Java",
"bytes": "35067647"
},
{
"name": "JavaScript",
"bytes": "69948"
},
{
"name": "Makefile",
"bytes": "24723"
},
{
"name": "PLSQL",
"bytes": "533"
},
{
"name": "PLpgSQL",
"bytes": "796728"
},
{
"name": "Python",
"bytes": "970860"
},
{
"name": "Roff",
"bytes": "10764"
},
{
"name": "Shell",
"bytes": "163853"
},
{
"name": "XSLT",
"bytes": "54683"
}
],
"symlink_target": ""
} |
"""
Enable basic can over a PCAN USB device.
"""
import logging
logger = logging.getLogger(__name__)
from can.interfaces.PCANBasic import *
from can.bus import BusABC
from can.message import Message
boottimeEpoch = 0
try:
import uptime
import datetime
boottimeEpoch = (uptime.boottime() - datetime.datetime.utcfromtimestamp(0)).total_seconds()
except:
boottimeEpoch = 0
# Set up logging
logging.basicConfig(level=logging.WARNING)
log = logging.getLogger('can.pcan')
class PcanBus(BusABC):
def __init__(self, channel, *args, **kwargs):
"""A PCAN USB interface to CAN.
:param str channel:
The can interface name. An example would be PCAN_USBBUS1
"""
if channel == '':
raise TypeError("Must specify a PCAN channel.")
else:
self.channel_info = channel
baudrate = PCAN_BAUD_500K
hwtype = PCAN_TYPE_ISA
ioport = 0x02A0
interrupt = 11
self.m_objPCANBasic = PCANBasic()
self.m_PcanHandle = globals()[channel]
result = self.m_objPCANBasic.Initialize(self.m_PcanHandle, baudrate, hwtype, ioport, interrupt)
if result != PCAN_ERROR_OK:
raise Exception(self.GetFormattedError(result))
super(PcanBus, self).__init__(*args, **kwargs)
def GetFormattedError(self, error):
# Gets the text using the GetErrorText API function
# If the function success, the translated error is returned. If it fails,
# a text describing the current error is returned.
#
#return error
stsReturn = self.m_objPCANBasic.GetErrorText(error, 0)
if stsReturn[0] != PCAN_ERROR_OK:
return "An error occurred. Error-code's text ({0:X}h) couldn't be retrieved".format(error)
else:
return stsReturn[1]
def recv(self, timeout=None):
rx_msg = Message()
log.debug("Trying to read a msg")
result = self.m_objPCANBasic.Read(self.m_PcanHandle)
if result[0] == PCAN_ERROR_QRCVEMPTY or result[0] == PCAN_ERROR_BUSLIGHT or result[0] == PCAN_ERROR_BUSHEAVY:
return None
elif result[0] != PCAN_ERROR_OK:
raise Exception(self.GetFormattedError(result[0]))
theMsg = result[1]
itsTimeStamp = result[2]
log.debug("I've got a message")
arbitration_id = theMsg.ID
bIsRTR = (theMsg.MSGTYPE & PCAN_MESSAGE_RTR.value) == PCAN_MESSAGE_RTR.value
bIsExt = (theMsg.MSGTYPE & PCAN_MESSAGE_EXTENDED.value) == PCAN_MESSAGE_EXTENDED.value
# Flags: EXT, RTR, ERR
#flags = (PYCAN_RTRFLG if bIsRTR else 0) | (PYCAN_STDFLG if not bIsExt else 0)
if bIsExt:
#rx_msg.id_type = ID_TYPE_EXTENDED
log.debug("CAN: Extended")
else:
#rx_msg.id_type = ID_TYPE_STANDARD
log.debug("CAN: Standard")
rx_msg.arbitration_id = arbitration_id
rx_msg.id_type = bIsExt
rx_msg.is_remote_frame = bIsRTR
rx_msg.dlc = theMsg.LEN
#rx_msg.flags = flags
rx_msg.data = theMsg.DATA
rx_msg.timestamp = boottimeEpoch + ((itsTimeStamp.micros + (1000 * itsTimeStamp.millis)) / (1000.0 * 1000.0))
return rx_msg
def send(self, msg):
if msg.id_type:
msgType = PCAN_MESSAGE_EXTENDED
else:
msgType = PCAN_MESSAGE_STANDARD
# create a TPCANMsg message structure
CANMsg = TPCANMsg()
# configure the message. ID, Length of data, message type and data
CANMsg.ID = msg.arbitration_id
CANMsg.LEN = len(msg.data)
CANMsg.MSGTYPE = msgType
# if a remote frame will be sent, data bytes are not important.
if msg.is_remote_frame:
CANMsg.MSGTYPE = msgType | PCAN_MESSAGE_RTR
else:
# copy data
for i in range(CANMsg.LEN):
CANMsg.DATA[i] = msg.data[i]
log.debug("Data: {}".format(msg.data))
log.debug("type: {}".format(type(msg.data)))
result = self.m_objPCANBasic.Write(self.m_PcanHandle, CANMsg)
if result != PCAN_ERROR_OK:
logging.error("Error sending frame :-/ " + self.GetFormattedError(result))
| {
"content_hash": "1f803a1b0530a09e04ac603e930beb12",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 117,
"avg_line_length": 31.45925925925926,
"alnum_prop": 0.607487638332941,
"repo_name": "BateauNautilus/DriveSimulator",
"id": "cc2f55e9fe968cc2298dd754a9287e324e8972a9",
"size": "4247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pythoncan/can/interfaces/pcan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176014"
}
],
"symlink_target": ""
} |
"""Block Storage Replication Control."""
| {
"content_hash": "4c0cabdaa01618501e2b0fdbf17761b2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 41,
"alnum_prop": 0.7317073170731707,
"repo_name": "skraghu/softlayer-python",
"id": "ac0a44b7eba09e3fa55ddbbca605eb1b5cdab437",
"size": "41",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/block/replication/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Python",
"bytes": "1039495"
}
],
"symlink_target": ""
} |
"""
Subsequent Address Family Idenitifier (SAFI)
http://www.iana.org/assignments/safi-namespace/safi-namespace.xhtml
"""
UNICAST = 1
MULTICAST = 2
MPLS_LABEL = 4 # RFC 3107
MPLS_VPN = 128 # RFC 4364
ROUTE_TARGET_CONSTRAINTS = 132 # RFC 4684
| {
"content_hash": "52b1cd1743346017ce3132fe9a29d678",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 67,
"avg_line_length": 24.5,
"alnum_prop": 0.726530612244898,
"repo_name": "zangree/ryu",
"id": "17ca138cb0b6ea97be0cb2a2f37dd89b19084f7f",
"size": "934",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ryu/lib/packet/safi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26231"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "872503"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "5243536"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
} |
import os
import sys
import datetime
import json
import psycopg2
from psycopg2.extras import Json
from bottle import get, post, run, request
from bottle import jinja2_template as template
# Connection credentials
DB_HOST = os.environ.get('DB_HOST')
if not DB_HOST:
print >> sys.stderr, 'Missing environment variable DB_HOST'
exit(1)
DB_NAME = os.environ.get('DB_NAME')
if not DB_NAME:
print >> sys.stderr, 'Missing environment variable DB_NAME'
exit(1)
# make sure we have AWS credentials and a S3 Bucket
DB_USER = os.environ.get('DB_USER')
if not DB_USER:
print >> sys.stderr, 'Missing environment variable DB_USER'
exit(1)
DB_PASSWORD = os.environ.get('DB_PASSWORD')
if not DB_PASSWORD:
print >> sys.stderr, 'Missing environment variable DB_PASSWORD'
exit(1)
TABLE_NAME = os.environ.get('TABLE_NAME')
if not TABLE_NAME:
print >> sys.stderr, 'Missing environment variable TABLE_NAME'
exit(1)
def write_to_db(data):
today = datetime.datetime.now()
# establish connection to RDS
conn = psycopg2.connect("host=%s dbname=%s user=%s password=%s" % (DB_HOST, DB_NAME, DB_USER, TABLE_NAME))
cur = conn.cursor()
cur.execute("insert into %s (jsondata) values (%s)", TABLE_NAME, [Json(data)])
conn.commit()
cur.close()
conn.close()
@post('/inbound_mail')
def inbound_mail():
post_data = request.POST
event_list = json.loads(post_data.get('mandrill_events'))
for data in event_list:
write_to_db(data)
return 'OK'
@get('/setup')
def setup():
url = request.url.replace('/setup', '/inbound_mail')
return template('This is your hook url, copy it:<h3>{{url}}</h3>', url=url)
run(host='0.0.0.0', port=int(os.environ.get('PORT', 8010)))
| {
"content_hash": "10512b21d59251cbe9f743d1e6e98e3f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 110,
"avg_line_length": 23.60810810810811,
"alnum_prop": 0.6720091585575272,
"repo_name": "joshuakarjala/mandrill-to-rds",
"id": "54192df76ad45a4e872ca83183fb4332b0604790",
"size": "1747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1747"
}
],
"symlink_target": ""
} |
import sys
import os
from re import sub
import smtplib
from email.mime.text import MIMEText
from getpass import getpass
class KaleInterp:
def __init__(self):
self.for_bool = False # tells file_reader whether or not it should writeout loops
self.while_bool = False # same as for_bool but for while loops
#self.keywords is a dictionary of keywords and function calls
self.keywords = {'write:' : self.write, 'var:' : self.variable,
'if:' : self.if_call, 'input:' : self.input,
'math:' : self.math, 'for:' : self.for_loop,
'while:' : self.while_loop}
self.kale_variables = {} # holds the variables from the kale program
open_file = open(sys.argv[1], encoding='utf-8')
# all variable must be declared above this method call
self.file_reader(open_file)
def file_reader(self, kale_file):
for line in kale_file:
split_line = line.split() # turns the line into an array for iter
if not self.for_bool and not self.while_bool: # if this is satisfied, a standard call is made i.e. no loop
self.read_key_words(split_line)
elif self.for_bool: # this is where looping info begins
self.write_loop_files('END:', '.tmp.txt', split_line, line)
elif self.while_bool:
self.write_loop_files('END_LOOP:', '.tmp_while.txt', split_line, line)
kale_file.close()
def read_key_words(self, split_line):
for key in self.keywords: # iterate through self.keywords
# try statement is to accomodate blank lines in the kale file
try:
if split_line[0] == key: # compare first word to keys
self.keywords[key](split_line) # make appropriate method call
except IndexError:
continue
def write(self, current_line):
buffer_string = '' # declare variable
for index in range(len(current_line)):
try:
# reassign string with words
# if statements puts variables into printed strings
if current_line[index + 1][0] == '_':
buffer_string += str(self.kale_variables[current_line[index + 1]])
else:
buffer_string += current_line[index + 1] + ' '
except IndexError:
break
print(buffer_string)
def variable(self, current_line):
# these assign variable to a dictionary to store them
if current_line[1] == 'bool:':
var_obj = self.bool_obj(current_line)
elif current_line[1] == 'int:':
var_obj = self.int_obj(current_line)
else:
var_obj = self.str_obj(current_line)
self.kale_variables[current_line[2]] = var_obj
# determines and returns the proper python type for each variable
def bool_obj(self, current_line):
if current_line[4] == 'True':
return True
else:
return False
# determines and returns the proper python type for each variable
def int_obj(self, current_line):
try:
return int(current_line[4])
except TypeError:
math_statement = current_line[1:]
return self.math(math_statement)
except ValueError:
if current_line[4][0] == '_':
for var in self.kale_variables:
if var == current_line[4]:
return self.kale_variables[var]
# determines and returns the proper python type for each variable
# gets all of the string
def str_obj(self, current_line):
var_buffer = ''
for line_index in range(len(current_line)):
try:
var_buffer += current_line[line_index + 4] + ' '
except IndexError:
break
return var_buffer
def if_call(self, whole_line):
conditional_statement = [] # this is the conditional to evaluate
result_statement = [] # this holds what the statement does
for x in range(len(whole_line)):
if whole_line[x + 1] == '->':
result_index = x + 2 # this is where the product begins
break
conditional_statement.append(whole_line[x + 1])
# should append the result_statement, I think
while result_index < len(whole_line):
result_statement.append(whole_line[result_index])
result_index += 1
# evaluates the statement and acts on it
if self.operation_eval(conditional_statement, True):
self.read_key_words(result_statement)
# the 'apostrophe' argument is because the method is multi use
def operation_eval(self, operation, apostrophe):
# evaluates the operational value and returns a simplified True or False
eval_buffer = ''
for item in operation:
if item[0] == '_':
for var_name in self.kale_variables:
if item == var_name:
eval_buffer += ' ' + str(self.insert_apostrophe(self.kale_variables[var_name], apostrophe))
break
else:
eval_buffer += ' ' + self.insert_apostrophe(item, apostrophe)
return eval(eval_buffer)
def input(self, split_line):
innit_counter = 5 # this is the index at which the prompt starts
prompt = '' # the variable to hold the prompt
while innit_counter < len(split_line):
prompt += split_line[innit_counter] + ' '
innit_counter += 1
tmp = input(prompt + '\n')
new_variable_line = [split_line[1], split_line[2], split_line[3],
split_line[4], tmp]
self.variable(new_variable_line)
def math(self, split_line):
innit_counter = 5
operation = []
while innit_counter < len(split_line):
operation.append(split_line[innit_counter])
innit_counter += 1
resolved_var = [split_line[1], split_line[2], split_line[3],
split_line[4], self.operation_eval(operation, False)]
self.variable(resolved_var)
# this takes the apostrophe argument because not everything this is called
# on actually needs this stuff
def insert_apostrophe(self, word, apostrophe):
if apostrophe:
try:
int(word)
return word
except ValueError:
python_key_word = [True, False, 'not', 'and', 'or', '==', '<', '>', '<=', '>=', '!=', 'True', 'False']
if word not in python_key_word:
return '\'' + word.strip() + '\''
# this is in case the operator is '=='
else:
return word
else:
return word
def for_loop(self, split_line):
open('.tmp.txt', 'w').close()
self.for_bool = True # sets flag so next lines get written to .tmp.txt
self.for_init_line = split_line
def read_loop_file(self, split_line):
count = int(split_line[1]) # the range for the kale loop
for _ in range(count): # controls how many time loop happens
loop_file = open('.tmp.txt', encoding = 'utf-8')
self.file_reader(loop_file)
os.remove('.tmp.txt')
def read_while_file(self, split_line):
con_statement = []
for item in split_line:
if item != 'while:' and item != '->': # writes and evals conditional
con_statement.append(item)
while True:
# this if serves to break the loop if condition becomes false
# in the kale file
if self.operation_eval(con_statement, True) is True:
pass
else:
break
# read and execute tmp file
while_loop_file = open('.tmp_while.txt', encoding = 'utf-8')
self.file_reader(while_loop_file)
os.remove('.tmp_while.txt') # remove the tmp file
def while_loop(self, split_line):
open('.tmp_while.txt', 'w').close()
self.while_bool = True # flag to trigger files to be written
self.while_init_line = split_line
def write_loop_files(self, end_word, file_name, split_line, line):
if split_line[0] == end_word: # breaks out of loop and resets for_bool
if end_word == 'END:':
self.for_bool = False
self.read_loop_file(self.for_init_line)
elif end_word == 'END_LOOP:':
self.while_bool = False
self.read_while_file(self.while_init_line)
else: # this writes instructions for the loop into a separate file which will be deleted
with open(file_name, 'a', encoding='utf-8') as loop_file:
loop_file.write(line)
class Cleanup:
def __init__(self):
self.clean()
def clean(self):
try:
os.remove('.tmp_while.txt')
print('clean...')
except FileNotFoundError:
print('clean...')
try:
os.remove('.tmp.txt')
print('clean...')
except FileNotFoundError:
print('clean...')
# class Refactor is the first addition that is ultimately designed to
# turn the munch interpreter into a suite of system management tools
class Refactor:
def __init__(self):
try:
original_file_name = sys.argv[2]
regex_pattern = sys.argv[3]
new_phrase = sys.argv[4]
except IndexError:
print('Error: must pass arguments \nfile_name old_phrase new_phrase')
sys.exit()
with open(original_file_name + '.backup', 'w') as backup: # creates the backup file
# below, we begin to write out the backup file
with open(original_file_name, 'r') as original:
for line in original:
backup.write(line)
# below rewrites the original file from backup.txt
# but using the sub method to replace given word
with open(original_file_name + '.backup', 'r') as backup: # 'r' protects the file from being deleted
with open(original_file_name, 'w') as rewrite: # 'w' ensures that the file will be overwritten
for line in backup:
new_line = sub(regex_pattern, new_phrase, line)
rewrite.write(new_line)
class HelpPage():
def __init__(self):
try:
if sys.argv[1] == '-h':
print('\n')
except IndexError:
print('\nERROR: must pass arguments\n')
print('Execute kalefile\t\tmunch file_name.kale')
print('-c\t\t\t\tClean up residual files munch occasionally makes')
print('-e\t\t\t\tSend email from the terminal (use text editor): -e email_body_file')
print('-h\t\t\t\tDisplay this help page')
print('-r\t\t\t\tTo refactor a file: munch -r file_name old_phrase new_phrase')
# smtplib stands for Simple Mail Transfer Protocol Library
# MIMEText I think converts the string values into usable text for the email
# gi.repository is the repository that holds the GUI stuff
class SendMail():
def __init__(self):
self.make_message()
self.usr_email = input('Sender email: ')
self.password = getpass('Password: ')
self.rec_email = input('Send to: ')
self.subject = input('Subject: ')
print(self.message)
if input('\n\nSend? y/n: ') == 'y':
self.send_email(self.message, self.subject, self.usr_email, self.rec_email, self.usr_email, self.password)
def make_message(self):
e_file = sys.argv[2]
self.message = ''
with open(e_file, 'r') as f:
for line in f:
self.message += line
def send_email(self, message_entry, subject, from_addr, to_addr, log_em, em_pass):
# Gtk.Entry.get_text(self.message_entry)
message = MIMEText(message_entry)
message['Subject'] = subject
message['From'] = from_addr
message['To'] = to_addr
# So the port number needs to be an integer, not a string value
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
# I added this s.login line based on some googling, but nothing extensive
s.login(log_em, em_pass)
s.send_message(message)
s.quit()
try:
if '.kale' in sys.argv[1]:
KaleInterp()
elif sys.argv[1] == '-c':
Cleanup()
elif sys.argv[1] == '-r':
Refactor()
elif sys.argv[1] == '-h':
HelpPage()
elif sys.argv[1] == '-e':
SendMail()
except IndexError:
HelpPage()
| {
"content_hash": "89fbcf31b01d4bfe4d289974addc70b7",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 119,
"avg_line_length": 39.39393939393939,
"alnum_prop": 0.5561538461538461,
"repo_name": "LordIronwheel/Munch-Kale",
"id": "782ec72c9ab93511ea58083e26fd1a0f9b58bba8",
"size": "13161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "munch.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13161"
}
],
"symlink_target": ""
} |
from django.db import models
class DownloadLocationEquivalent(models.Model):
old_location = models.CharField(max_length=512)
new_location = models.CharField(max_length=512)
class WhatTorrentMigrationStatus(models.Model):
STATUS_PROCESSING = 0
STATUS_DUPLICATE = 1
STATUS_SKIPPED = 2
STATUS_UPLOADED = 3
STATUS_COMPLETE = 4
STATUS_SKIPPED_PERMANENTLY = 5
STATUS_FAILED_VALIDATION = 6
STATUS_RESEEDED = 7
what_torrent_id = models.BigIntegerField(unique=True)
status = models.IntegerField()
pth_torrent_id = models.BigIntegerField(null=True)
class TorrentGroupMapping(models.Model):
what_group_id = models.BigIntegerField()
pth_group_id = models.BigIntegerField()
class Meta:
unique_together = (('what_group_id', 'pth_group_id'),)
| {
"content_hash": "2aa070e4d1054651dcd0ad94f1abbc75",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 62,
"avg_line_length": 27.862068965517242,
"alnum_prop": 0.7116336633663366,
"repo_name": "karamanolev/WhatManager2",
"id": "e6049b6d7376fa34fc4a2a1df6497c563185cc3d",
"size": "808",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wcd_pth_migration/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202636"
},
{
"name": "HTML",
"bytes": "139705"
},
{
"name": "JavaScript",
"bytes": "632927"
},
{
"name": "Python",
"bytes": "508360"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2012-2014, Austin Benson and David Gleich
All rights reserved.
This file is part of MRTSQR and is under the BSD 2-Clause License,
which can be found in the LICENSE file in the root directory, or at
http://opensource.org/licenses/BSD-2-Clause
"""
""" Generate TSQR test problems.
These problem use a constant R factor. R is just the upper triangle
of the all ones matrix.
The output of this script is a Hadoop distributed sequence file,
where each key is a random number, and each value is a row of
the matrix.
History
-------
:2011-01-26: Initial coding
:2011-01-27: Added maprows to let mappers handle more than ncols of data.
"""
__author__ = 'David F. Gleich'
import sys
import os
import re
import math
import random
import dumbo
import dumbo.util
import dumbo.lib
import numpy
import util
# create the global options structure
gopts = util.GlobalOptions()
def setstatus(msg):
print >>sys.stderr, "Status:", msg
dumbo.util.setstatus(msg)
def first_mapper(data):
""" This mapper doesn't take any input, and generates the R factor. """
hostname = os.uname()[1]
print >>sys.stderr, hostname, "is a mapper"
# suck up all the data so Hadoop doesn't complain
for key,val in data:
pass
n = gopts.getintkey('ncols')
m = int(os.getenv('nrows'))
k = int(os.getenv('maprows'))/n
s = float(m)/float(n)
setstatus(
"generating %i-by-%i R matrix with scale factor %i/%i=%s"%(
n, n, m, n, s))
R = numpy.triu(numpy.ones((n,n)))/math.sqrt(s)
for i in xrange(k):
setstatus(
'step %i/%i: generating local %i-by-%i Q matrix'%(i+1,k,n,n))
Q = numpy.linalg.qr(numpy.random.randn(n,n))[0] # just the Q factor
setstatus('step %i/%i: multiplying local matrix'%(i+1,k))
A = Q.dot(R)
setstatus('step %i/%i: outputting %i rows'%(i+1,k,A.shape[0]))
for row in A:
key = random.randint(0, 4000000000)
yield key, util.array2list(row)
def localQoutput(rows):
setstatus('converting to numpy array')
A = numpy.array(rows)
localm = A.shape[0]
setstatus('generating local Q of size %i-by-%i'%(localm,localm))
Q = numpy.linalg.qr(numpy.random.randn(localm,localm))[0] # just the Q factor
setstatus(
'multiplying %i-by-%i A by %i-by-%i Q'%(localm,A.shape[1],localm,localm))
A = Q.dot(A)
setstatus('outputting')
for row in A:
yield util.array2list(row)
def second_mapper(data):
n = gopts.getintkey('ncols')
m = int(os.getenv('nrows'))
maxlocal = int(os.getenv('maxlocal'))
totalrows = 0
totalouts = 0
rows = []
setstatus('acquiring data with ncols=%i'%(n))
for key,value in data:
assert(len(value) == n)
rows.append(value)
totalrows += 1
if len(rows) >= maxlocal:
dumbo.util.incrcounter('Program','rows acquired',len(rows))
totalouts += 1
for row in localQoutput(rows):
key = random.randint(0, 4000000000)
yield key, row
# reset rows, status
rows = []
setstatus('acquiring data with ncols=%i'%(n))
if len(rows) > 0:
for row in localQoutput(rows):
key = random.randint(0, 4000000000)
yield key, row
def starter(prog):
""" Start the program with a null input. """
# get options
# set the global opts
gopts.prog = prog
prog.addopt('memlimit','4g')
prog.addopt('file','util.py')
prog.addopt('libegg','numpy')
m = gopts.getintkey('nrows',None) # error with no key
n = gopts.getintkey('ncols',None) # error with no key
maprows = gopts.getintkey('maprows',2*n)
stages = gopts.getintkey('nstages',2)
maxlocal = gopts.getintkey('maxlocal',n)
if maprows % n is not 0:
maprows = (maprows/n)*n
gopts.setkey('maprows',maprows)
print "'maprows' adjusted to", maprows, "to ensure integer k in maprows=k*ncols"
if m % maprows is not 0:
m = ((m/maprows)+1)*maprows
gopts.setkey('nrows',m)
print "'nrows' changed to", m, "to ensure scalar integer k in nrows=k*maprows"
print "using", stages, "stages"
gopts.save_params()
prog.addopt('input','IGNORED')
prog.addopt('libjar','../java/build/jar/hadoop-lib.jar')
prog.addopt('inputformat','gov.sandia.dfgleic.NullInputFormat')
prog.addopt('jobconf','mapred.output.compress=true')
prog.addopt('jobconf','mapred.output.compress.codec=com.hadoop.compression.lzo.LzoCodec')
prog.addopt('jobconf','fs.local.block.size='+str(int(1024*1024*256)))
def runner(job):
# grab info from environment
m = gopts.getintkey('nrows')
maprows = gopts.getintkey('maprows')
k = m/maprows
stages = gopts.getintkey('nstages')
print >>sys.stderr, "using %i map tasks"%(k)
for i in xrange(stages):
if i==0:
opts = [('numreducetasks',str(k)),
('nummaptasks',str(k))]
job.additer(first_mapper,
"org.apache.hadoop.mapred.lib.IdentityReducer",
opts=opts)
else:
job.additer(second_mapper,"org.apache.hadoop.mapred.lib.IdentityReducer",
opts=[('numreducetasks',str(k))])
if __name__=='__main__':
# find the hadoop examples jar
dumbo.main(runner, starter)
| {
"content_hash": "fa1f889f6cef1aa73a194a20581127c9",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 93,
"avg_line_length": 28.225,
"alnum_prop": 0.5886625332152348,
"repo_name": "arbenson/mrtsqr",
"id": "532b043a6f24ef0daa1bc231a9c4125f204f1319",
"size": "5645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dumbo/generate_test_problems.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2012"
},
{
"name": "C++",
"bytes": "76142"
},
{
"name": "Java",
"bytes": "21697"
},
{
"name": "Makefile",
"bytes": "1309"
},
{
"name": "Python",
"bytes": "292758"
},
{
"name": "Shell",
"bytes": "13614"
}
],
"symlink_target": ""
} |
from yubikey_totp_gui import main
| {
"content_hash": "c4db091387215b56120ee0ff385f8ffe",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.8235294117647058,
"repo_name": "ldrumm/yubikey-totp-gui",
"id": "b72aaf646703ca37d6bc1134929f7abdbce9eeb6",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18069"
}
],
"symlink_target": ""
} |
from django.db.models import Q
from django.test import SimpleTestCase
from django_perf_rec.orm import patch_ORM_to_be_deterministic
class PatchORMToBeDeterministicTests(SimpleTestCase):
def test_call_it(self):
patch_ORM_to_be_deterministic()
def test_call_it_again(self):
patch_ORM_to_be_deterministic()
def test_q_connector(self):
q1 = Q(foo="bar") | Q(bar="foo")
_path, args, kwargs = q1.deconstruct()
q2 = Q(*args, **kwargs)
self.assertEqual(q1, q2)
| {
"content_hash": "ca836f7178e5cb863bede3c40b38c253",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 28.77777777777778,
"alnum_prop": 0.6640926640926641,
"repo_name": "YPlan/django-perf-rec",
"id": "efdda1cc40b802cb1697a51ca1e2d61fc39f4643",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_orm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54819"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from common import views
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^disciplines/?$', views.disciplines_index, name='disciplines.index'),
url(r'^disciplines/new$',
views.DisciplineCreateView.as_view(), name='disciplines.new'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<id>\d+)$',
views.discipline_detail, name='disciplines.detail'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<pk>\d+)/edit$',
views.DisciplineUpdateView.as_view(), name='disciplines.edit'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<pk>\d+)/delete$',
views.DisciplineDeleteView.as_view(), name='disciplines.delete'),
url(r'^performances/?$',
views.performances_index, name='performances.index'),
url(r'^performances/new$',
views.PerformanceCreateView.as_view(), name='performances.new'),
url(r'^performances/(?P<pk>\d+)$',
views.PerformanceDetailView.as_view(), name='performances.detail'),
url(r'^performances/(?P<pk>\d+)/edit$',
views.PerformanceUpdateView.as_view(), name='performances.edit'),
url(r'^performances/(?P<pk>\d+)/delete$',
views.PerformanceDeleteView.as_view(), name='performances.delete'),
]
| {
"content_hash": "3c8160af22ab4883a81dc56b8b492c18",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 44.392857142857146,
"alnum_prop": 0.6395816572807723,
"repo_name": "saechtner/turn-events",
"id": "14ad687a234ad8952cd6bf115409322cd38c7216",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Turnauswertung-py3/common/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8553"
},
{
"name": "HTML",
"bytes": "130726"
},
{
"name": "JavaScript",
"bytes": "85561"
},
{
"name": "Python",
"bytes": "90506"
},
{
"name": "TeX",
"bytes": "15536"
}
],
"symlink_target": ""
} |
import azurelinuxagent.utils.shellutil as shellutil
from azurelinuxagent.distro.default.osutil import DefaultOSUtil
class AlpineOSUtil(DefaultOSUtil):
def __init__(self):
super(AlpineOSUtil, self).__init__()
self.agent_conf_file_path = '/etc/waagent.conf'
def is_dhcp_enabled(self):
return True
def get_dhcp_pid(self):
ret = shellutil.run_get_output("pidof udhcpc")
return ret[1] if ret[0] == 0 else None
def set_ssh_client_alive_interval(self):
# Alpine will handle this.
pass
def conf_sshd(self, disable_password):
# Alpine will handle this.
pass
| {
"content_hash": "10a9ebd13962112a22ca473dbb8f7800",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 63,
"avg_line_length": 29.363636363636363,
"alnum_prop": 0.651702786377709,
"repo_name": "nathanleclaire/WALinuxAgent",
"id": "e20c1a590b703d3f95c963b0688caa7bbcf9138e",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azurelinuxagent/distro/alpine/osutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "590688"
},
{
"name": "Shell",
"bytes": "5124"
}
],
"symlink_target": ""
} |
import os
import sys
import fcntl
import random
import socket
import logging
import threading
import collections
import multiprocessing
import synapse.exc as s_exc
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.dyndeps as s_dyndeps
import synapse.eventbus as s_eventbus
import synapse.lib.kv as s_kv
import synapse.lib.net as s_net
import synapse.lib.config as s_config
import synapse.lib.msgpack as s_msgpack
import synapse.lib.threads as s_threads
import synapse.lib.crypto.ecc as s_ecc
import synapse.lib.crypto.vault as s_vault
import synapse.lib.crypto.tinfoil as s_tinfoil
logger = logging.getLogger(__name__)
'''
Base classes for the synapse "cell" microservice architecture.
'''
CELL_PROTO_VERSION = (1, 0)
class SessBoss:
'''
Mixin base class for cell session managers.
'''
def __init__(self, auth, roots=()):
self._boss_auth = auth
self.roots = list(roots)
root = s_vault.Cert.load(auth[1].get('root'))
self.roots.append(root)
self._my_static_prv = s_ecc.PriKey.load(auth[1].get('ecdsa:prvkey'))
self.cert = s_vault.Cert.load(auth[1].get('cert'))
self.certbyts = self.cert.dump()
def valid(self, cert):
if not any([r.signed(cert) for r in self.roots]):
return False
tock = cert.tokn.get('expires')
if tock is None:
logger.warning('SessBoss: cert has no "expires" value')
return False
tick = s_common.now()
if tock < tick:
logger.warning('SessBoss: cert has expired')
return False
return True
class Cell(s_config.Configable, s_net.Link, SessBoss):
'''
A Cell is a micro-service in a neuron cluster.
Args:
dirn (str): Path to the directory backing the Cell.
conf (dict): Configuration data.
'''
_def_port = 0
def __init__(self, dirn, conf=None):
s_net.Link.__init__(self)
s_config.Configable.__init__(self)
self.dirn = dirn
s_common.gendir(dirn)
# config file in the dir first...
self.loadConfPath(self._path('config.json'))
if conf is not None:
self.setConfOpts(conf)
self.reqConfOpts()
self.plex = s_net.Plex()
self.kvstor = s_kv.KvStor(self._path('cell.lmdb'))
self.kvinfo = self.kvstor.getKvDict('cell:info')
# open our vault
self.vault = s_vault.Vault(self._path('vault.lmdb'))
self.root = self.vault.genRootCert()
# setup our certificate and private key
auth = self._genSelfAuth()
roots = self.vault.getRootCerts()
SessBoss.__init__(self, auth, roots)
self.cellinfo = {'ctor': '%s.%s' % (self.__class__.__module__, self.__class__.__name__)}
self.cellauth = auth
self.cellpool = None
self.celluser = CellUser(auth, roots=roots)
addr = self.getConfOpt('bind')
port = self.getConfOpt('port')
def onlink(link):
sess = CellSess(link, self)
link.onrx(sess.rx)
# fini cuts both ways
sess.onfini(link.fini)
link.onfini(sess.fini)
addr, port = self.plex.listen((addr, port), onlink)
host = self.getConfOpt('host')
self.celladdr = (host, port)
# add it to our neuron reg info...
self.cellinfo['addr'] = self.celladdr
# lock cell.lock
self.lockfd = s_common.genfile(self._path('cell.lock'))
try:
fcntl.lockf(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
logger.exception('Failed to obtain lock for [%s]', self.lockfd.name)
raise
self.onfini(self._onCellFini)
self.onfini(self.finiCell)
self.neuraddr = self.cellauth[1].get('neuron')
if self.neuraddr is not None:
self.cellpool = CellPool(auth, self.neuraddr, neurfunc=self._onNeurSess)
self.onfini(self.cellpool.fini)
# Give implementers the chance to hook into the cell
self.postCell()
logger.debug('Cell is done initializing')
def _onNeurSess(self, sess):
def retn(ok, retn):
if not ok:
logger.warning('%s cell:reg %r' % (self.__class__.__name__, ok))
# either way, try again soon...
if not sess.isfini:
s_glob.sched.insec(60, cellreg)
def cellreg():
if sess.isfini:
return
sess.callx(('cell:reg', self.cellinfo), retn)
cellreg()
def _genCellName(self, name):
return name
def _genSelfAuth(self):
path = self._path('cell.auth')
if os.path.isfile(path):
return s_msgpack.loadfile(path)
name = self._genCellName('root')
root = self.vault.genUserAuth(name)
s_msgpack.dumpfile(root, path)
path = self._path('user.auth')
name = self._genCellName('user')
user = self.vault.genUserAuth(name)
s_msgpack.dumpfile(user, path)
return root
def _onCellFini(self):
self.plex.fini()
self.kvstor.fini()
self.vault.fini()
self.lockfd.close()
def postCell(self):
'''
Module implementers may over-ride this method to initialize the cell
*after* the configuration data has been loaded.
Returns:
None
'''
pass
def finiCell(self):
'''
Module implementors may over-ride this method to automatically tear down
resources created during postCell().
'''
pass
def handlers(self):
'''
Module implementors may over-ride this method to provide the
``<mesg>:<func>`` mapping required for the Cell link layer.
Returns:
dict: Dictionary mapping endpoints to functions.
'''
return {
'cell:ping': self._onCellPing,
}
def genUserAuth(self, name):
'''
Generate an auth blob that is valid for this Cell.
Args:
name (str): Name of the user to generate the auth blob for.
Returns:
((str, dict)): A user auth tufo.
'''
return self.vault.genUserAuth(name)
def getCellAddr(self):
'''
Return a (host, port) address tuple for the Cell.
'''
return self.celladdr
def getCellAuth(self):
'''
Return the auth structure for this Cell.
Returns:
((str,dict)): Auth tufo for this Cell.
'''
return self.cellauth
def getRootCert(self):
'''
Get the root certificate for the cell.
Returns:
s_vault.Cert: The root Cert object for the cell.
'''
return self.root
def getCellDict(self, name):
'''
Get a KvDict with a given name.
Args:
name (str): Name of the KvDict.
Notes:
Module implementers may use the ``getCellDict()`` API to get
a KvDict object which acts like a Python dictionary, but will
persist data across process startup/shutdown. The keys and
values are msgpack encoded prior to storing them, allowing the
persistence of complex data structures.
Returns:
s_kv.KvDict: A persistent KvDict.
'''
return self.kvstor.getKvDict('cell:dict:' + name)
def getCellSet(self, name):
'''
Get a KvList with a given name.
'''
return self.kvstor.getKvSet('cell:set:' + name)
def _onCellPing(self, chan, mesg):
data = mesg[1].get('data')
chan.txfini(data)
def _path(self, *paths):
'''
Join a path relative to the cell persistence directory.
'''
return os.path.join(self.dirn, *paths)
def getCellPath(self, *paths):
'''
Get a file path underneath the underlying Cell path.
Args:
*paths: Paths to join together.
Notes:
Does not protect against path traversal.
This does not make any required paths.
Returns:
str: Path under the cell
'''
return os.path.join(self.dirn, 'cell', *paths)
def getCellDir(self, *paths):
'''
Get (and make) a directory underneath the underlying Cell path.
Args:
*paths: Paths to join together
Notes:
Does not protect against path traversal.
Returns:
str: Path under the cell
'''
return s_common.gendir(self.dirn, 'cell', *paths)
def initConfDefs(self):
self.addConfDefs((
('ctor', {
'ex': 'synapse.cells.axon',
'doc': 'The path to the cell constructor'}),
('bind', {'defval': '0.0.0.0', 'req': 1,
'doc': 'The IP address to bind'}),
('host', {'defval': socket.gethostname(),
'ex': 'cell.vertex.link',
'doc': 'The host name used to connect to this cell. This should resolve over DNS. Defaults to the result of socket.gethostname().'}),
('port', {'defval': 0,
'doc': 'The TCP port the Cell binds to (defaults to dynamic)'}),
))
class Sess(s_net.Link):
'''
Manages network session establishment and maintainance
We use NIST SP 56A r2 "C(2e, 2s, ECC DH)", a scheme where both parties have 2 key pairs: static and ephemeral.
Sequence diagram U: initiator, V: listener, Ec: public ephemeral initiator key, ec: private ephemeral initiator
U -> V: Ec, initiator cert
V -> U: Es, listener cert, encrypted message ("helo")
The first encrypted message is sent in order to as quickly as possible identify a failure.
'''
def __init__(self, link, boss, lisn=False):
s_net.Link.__init__(self, link)
self.chain(link)
self._sess_boss = boss
self.is_lisn = lisn # True if we are the listener.
self._crypter = None # type: CryptSeq
self._my_ephem_prv = None # type: s_ecc.PriKey
self._tx_lock = threading.Lock()
def handlers(self):
return {
'helo': self._onMesgHelo,
'xmit': self._onMesgXmit,
'fail': self._onMesgFail
}
def _tx_real(self, mesg):
if self._crypter is None:
raise s_exc.NotReady(mesg='Crypter not set')
with self._tx_lock:
data = self._crypter.encrypt(mesg)
self.link.tx(('xmit', {'data': data}))
def _onMesgFail(self, link, mesg):
logger.error('Remote peer issued error: %r.', mesg)
self.txfini()
def _send_fail(self, exc):
self.link.tx(('fail', {'exception': repr(exc)}))
def _onMesgXmit(self, link, mesg):
if self._crypter is None:
logger.warning('xmit message before session establishment complete')
raise s_common.NotReady()
ciphertext = mesg[1].get('data')
try:
newm = self._crypter.decrypt(ciphertext)
except Exception as e:
self._send_fail(s_common.getexcfo(e))
logger.exception('decryption')
self.txfini()
return
try:
self.taskplex.rx(self, newm)
except Exception as e:
self._send_fail(s_common.getexcfo(e))
logger.exception('xmit taskplex error')
self.txfini()
@s_glob.inpool
def _initiateSession(self):
'''
(As the initiator) start a new session
Send ephemeral public and my certificate
'''
if self.is_lisn:
raise Exception('Listen link cannot initiate a session')
self._my_ephem_prv = s_ecc.PriKey.generate()
self.link.tx(('helo', {'version': CELL_PROTO_VERSION,
'ephem_pub': self._my_ephem_prv.public().dump(),
'cert': self._sess_boss.certbyts}))
def _handSessMesg(self, mesg):
'''
Validate and set up the crypto from a helo message
'''
if self._crypter is not None:
raise s_exc.ProtoErr('Received two client helos')
if self.is_lisn:
self._my_ephem_prv = s_ecc.PriKey.generate()
version = mesg[1].get('version')
if version != CELL_PROTO_VERSION:
raise s_exc.ProtoErr('Found peer with missing or incompatible version')
peer_cert = s_vault.Cert.load(mesg[1].get('cert'))
peer_ephem_pub = s_ecc.PubKey.load(mesg[1].get('ephem_pub'))
if not self._sess_boss.valid(peer_cert):
clsn = self.__class__.__name__
raise s_exc.CryptoErr(mesg='%s got bad cert (%r)' % (clsn, peer_cert.iden(),))
peer_static_pub = s_ecc.PubKey.load(peer_cert.tokn.get('ecdsa:pubkey'))
km = s_ecc.doECDHE(self._my_ephem_prv, peer_ephem_pub,
self._sess_boss._my_static_prv, peer_static_pub, info=b'session')
to_initiator_symkey, to_listener_symkey = km[:32], km[32:]
if self.is_lisn:
self._crypter = s_tinfoil.CryptSeq(to_listener_symkey, to_initiator_symkey)
else:
self._crypter = s_tinfoil.CryptSeq(to_initiator_symkey, to_listener_symkey)
# Decrypt the test message
testmesg = mesg[1].get('testmesg')
self._crypter.decrypt(testmesg)
return peer_cert
@s_glob.inpool
def _onMesgHelo(self, link, mesg):
'''
Handle receiving the session establishment message from the peer.
send back our ephemerical public, our cert, and, if the listener, an encrypted message
'''
try:
peer_cert = self._handSessMesg(mesg)
except Exception as e:
logger.exception('Exception encountered handling session message.')
self._send_fail(s_common.getexcfo(e))
self.txfini()
return
if self.is_lisn:
# This would be a good place to stick version or info stuff
testmesg = {}
with self._tx_lock:
self.link.tx(('helo', {'version': CELL_PROTO_VERSION,
'ephem_pub': self._my_ephem_prv.public().dump(),
'cert': self._sess_boss.certbyts,
'testmesg': self._crypter.encrypt(testmesg)}))
user = peer_cert.tokn.get('user')
self.setLinkProp('cell:peer', user)
self.fire('sess:txok')
self._my_ephem_prv = None
class UserSess(Sess):
'''
The session object for a CellUser.
'''
def __init__(self, chan, prox):
Sess.__init__(self, chan, prox, lisn=False)
self._sess_prox = prox
self._txok_evnt = threading.Event()
self.on('sess:txok', self._setTxOk)
self.taskplex = s_net.ChanPlex()
self.taskplex.setLinkProp('repr', 'UserSess.taskplex')
def _setTxOk(self, mesg):
self._txok_evnt.set()
def waittx(self, timeout=None):
self._txok_evnt.wait(timeout=timeout)
return self._txok_evnt.is_set()
def call(self, mesg, timeout=None):
'''
Call a Cell endpoint which returns a single value.
'''
with self.task(mesg, timeout=timeout) as chan:
return chan.next(timeout=timeout)
def callx(self, mesg, func):
if self.isfini:
return func(False, ('IsFini', {}))
chan = self.chan()
def rx(link, data):
chan.setLinkProp('callx:retn', True)
chan.fini()
func(*data) # ok, retn
chan.onrx(rx)
def fini():
if chan.getLinkProp('callx:retn') is not None:
return
func(False, ('LinkTimeOut', {}))
chan.onfini(fini)
chan.tx(mesg)
def task(self, mesg=None, timeout=None):
'''
Open a new channel within our session.
'''
chan = self.taskplex.open(self)
chan.setq()
if mesg is not None:
chan.tx(mesg)
return chan
def chan(self):
return self.taskplex.open(self)
class CellSess(Sess):
'''
The session object for the Cell.
'''
def __init__(self, chan, cell):
Sess.__init__(self, chan, cell, lisn=True)
self._sess_cell = cell
def onchan(chan):
chan.setLinkProp('cell:peer', self.getLinkProp('cell:peer'))
chan.onrx(self._sess_cell.rx)
self.taskplex = s_net.ChanPlex(onchan=onchan)
self.taskplex.setLinkProp('repr', 'CellSess.taskplex')
self.onfini(self.taskplex.fini)
class CellUser(SessBoss, s_eventbus.EventBus):
def __init__(self, auth, roots=()):
s_eventbus.EventBus.__init__(self)
SessBoss.__init__(self, auth, roots=roots)
def open(self, addr, timeout=None):
'''
Synchronously opens the Cell at the remote addr and return a UserSess Link.
Args:
addr ((str,int)): A (host, port) address tuple
timeout (int/float): Connection timeout in seconds.
Raises:
CellUserErr: Raised if a timeout or link negotiation fails. May have
additional data in the ``excfo`` field.
Returns:
UserSess: The connected Link.
'''
with s_threads.RetnWait() as retn:
def onlink(ok, link):
if not ok:
erno = link
errs = os.strerror(erno)
return retn.errx(OSError(erno, errs))
sess = UserSess(link, self)
sess._initiateSession()
retn.retn(sess)
s_glob.plex.connect(tuple(addr), onlink)
isok, sess = retn.wait(timeout=timeout)
if not isok:
raise s_common.CellUserErr(mesg='retnwait timed out or failed', excfo=sess)
if not sess.waittx(timeout=timeout):
raise s_common.CellUserErr(mesg='waittx timed out or failed')
return sess
def getCellSess(self, addr, func):
'''
A non-blocking way to form a session to a remote Cell.
Args:
addr (tuple): A address, port tuple.
func: A callback function which takes a (ok, retn) args
Returns:
None
'''
def onsock(ok, retn):
if not ok:
return func(False, retn)
link = retn
sess = UserSess(link, self)
def txok(x):
sess.setLinkProp('sess:txok', True)
func(True, sess)
def fini():
# if we dont have a peer, we were not successful
if sess.getLinkProp('cell:peer') is not None:
return
func(False, ('IsFini', {}))
sess.on('sess:txok', txok)
sess.onfini(fini)
sess._initiateSession()
s_glob.plex.connect(tuple(addr), onsock)
class CellPool(s_eventbus.EventBus):
'''
A CellPool maintains sessions with a neuron and cells.
'''
def __init__(self, auth, neuraddr, neurfunc=None):
s_eventbus.EventBus.__init__(self)
self.neur = None
self.neuraddr = neuraddr
self.neurfunc = neurfunc
self.auth = auth
self.user = CellUser(auth)
self.names = collections.deque() # used for round robin..
self.ctors = {}
self.cells = s_eventbus.BusRef()
self.neurok = threading.Event()
self._fireNeurLink()
self.onfini(self.cells.fini)
def neurwait(self, timeout=None):
'''
Wait for the neuron connection to be ready.
Returns:
bool: True on ready, False on timeout.
'''
return self.neurok.wait(timeout=timeout)
def items(self):
return self.cells.items()
def _fireNeurLink(self):
if self.isfini:
return
def fini():
if not self.isfini:
self._fireNeurLink()
def onsess(ok, sess):
if not ok:
if self.isfini:
return
s_glob.sched.insec(2, self._fireNeurLink)
return
sess.onfini(fini)
self.neur = sess
self.neurok.set()
if self.neurfunc:
self.neurfunc(sess)
self.user.getCellSess(self.neuraddr, onsess)
def add(self, name, func=None):
'''
Add a named cell to the pool.
Func will be called back with each new Sess formed.
'''
self.names.append(name)
def retry():
if not self.isfini:
s_glob.sched.insec(2, connect)
def onsess(ok, retn):
if self.isfini:
return
if not ok:
self.fire('cell:disc', name=name)
logger.warning('CellPool.add(%s) onsess error: %r' % (name, retn))
return retry()
sess = retn
sess.onfini(connect)
self.cells.put(name, sess)
if func is not None:
try:
func(sess)
except Exception as e:
logger.exception('CellPool.add(%s) callback failed' % (name,))
self.fire('cell:add', name=name, sess=sess)
def onlook(ok, retn):
if self.isfini:
return
if not ok:
logger.warning('CellPool.add(%s) onlook error: %r' % (name, retn))
return retry()
if retn is None:
logger.warning('CellPool.add(%s) onlook retn none.' % (name,))
return retry()
addr = retn.get('addr')
self.user.getCellSess(addr, onsess)
def connect():
if self.isfini:
return
self.lookup(name, onlook)
connect()
def get(self, name):
return self.cells.get(name)
def lookup(self, name, func):
if self.neur is None:
return func(False, ('NotReady', {}))
mesg = ('cell:get', {'name': name})
self.neur.callx(mesg, func)
def any(self):
items = self.cells.items()
if not items:
return False, ('NotReady', {})
return True, random.choice(items)
def divide(dirn, conf=None):
'''
Create an instance of a Cell in a subprocess.
Args:
dirn (str): Path to the directory backing the Cell.
conf (dict): Configuration data.
Returns:
multiprocessing.Process: The Process object which was created to run the Cell
'''
ctx = multiprocessing.get_context('spawn')
proc = ctx.Process(target=main, args=(dirn, conf))
proc.start()
return proc
def getCellCtor(dirn, conf=None):
'''
Find the ctor option for a Cell and resolve the function.
Args:
dirn (str): The path to the Cell directory. This may contain the the
ctor in the ``config.json`` file.
conf (dict): Configuration dictionary for the cell. This may contain
the ctor in the ``ctor`` key.
Returns:
((str, function)): The python path to the ctor function and the resolved function.
Raises:
ReqConfOpt: If the ctor cannot be resolved from the cell path or conf
NoSuchCtor: If the ctor function cannot be resolved.
'''
ctor = None
if conf is not None:
ctor = conf.get('ctor')
path = s_common.genpath(dirn, 'config.json')
if ctor is None and os.path.isfile(path):
subconf = s_common.jsload(path)
ctor = subconf.get('ctor')
if ctor is None:
raise s_common.ReqConfOpt(mesg='Missing ctor, cannot divide',
name='ctor')
func = s_dyndeps.getDynLocal(ctor)
if func is None:
raise s_common.NoSuchCtor(mesg='Cannot resolve ctor', name=ctor)
return ctor, func
def main(dirn, conf=None):
'''
Initialize and execute the main loop for a Cell.
Args:
dirn (str): Directory backing the Cell data.
conf (dict): Configuration dictionary.
Notes:
This ends up calling ``main()`` on the Cell, and does not return
anything. It cals sys.exit() at the end of its processing.
'''
try:
# Configure logging since we may have come in via
# multiprocessing.Process as part of a Daemon config.
s_common.setlogging(logger,
os.getenv('SYN_TEST_LOG_LEVEL', 'WARNING'))
dirn = s_common.genpath(dirn)
ctor, func = getCellCtor(dirn, conf=conf)
cell = func(dirn, conf)
addr = cell.getCellAddr()
logger.warning('cell divided: %s (%s) addr: %r' % (ctor, dirn, addr))
cell.main()
sys.exit(0)
except Exception as e:
logger.exception('main: %s (%s)' % (dirn, e))
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1])
| {
"content_hash": "b81947c1b8cbad5f0d269f478ced20c9",
"timestamp": "",
"source": "github",
"line_count": 897,
"max_line_length": 149,
"avg_line_length": 28.0479375696767,
"alnum_prop": 0.5553082395961684,
"repo_name": "vivisect/synapse",
"id": "d5df79cc137b2efa3d2fc66303c75f18f33feac9",
"size": "25159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/lib/cell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716598"
}
],
"symlink_target": ""
} |
alice_name = "Alice"
alice_age = 20
alice_is_drinking = True
bob_name = "Bob"
bob_age = 12
bob_is_drinking = False
charles_name = "Charles"
charles_age = 22
charles_is_drinking = True
| {
"content_hash": "ad67bf785aa4e4a41433be25c2254947",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 26,
"avg_line_length": 16.90909090909091,
"alnum_prop": 0.6989247311827957,
"repo_name": "marwahaha/python-fundamentals",
"id": "d6f8b2349fac1b83bf3856806a8299e9ad8b02a6",
"size": "279",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "challenges/04-Functions/D_your_own_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18292"
},
{
"name": "Ruby",
"bytes": "3485"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from oauth2_provider.settings import oauth2_settings
from django.db import models, migrations
import oauth2_provider.validators
import oauth2_provider.generators
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
migrations.swappable_dependency(oauth2_settings.APPLICATION_MODEL),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('client_id', models.CharField(default=oauth2_provider.generators.generate_client_id, unique=True, max_length=100, db_index=True)),
('redirect_uris', models.TextField(help_text='Allowed URIs list, space separated', blank=True, validators=[oauth2_provider.validators.validate_uris])),
('client_type', models.CharField(max_length=32, choices=[('confidential', 'Confidential'), ('public', 'Public')])),
('authorization_grant_type', models.CharField(max_length=32, choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials')])),
('client_secret', models.CharField(default=oauth2_provider.generators.generate_client_secret, max_length=255, db_index=True, blank=True)),
('name', models.CharField(max_length=255, blank=True)),
('skip_authorization', models.BooleanField(default=False)),
('user', models.ForeignKey(related_name='oauth2_provider_application', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AccessToken',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=255, db_index=True)),
('expires', models.DateTimeField()),
('scope', models.TextField(blank=True)),
('application', models.ForeignKey(to=oauth2_settings.APPLICATION_MODEL)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='Grant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(max_length=255, db_index=True)),
('expires', models.DateTimeField()),
('redirect_uri', models.CharField(max_length=255)),
('scope', models.TextField(blank=True)),
('application', models.ForeignKey(to=oauth2_settings.APPLICATION_MODEL)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RefreshToken',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=255, db_index=True)),
('access_token', models.OneToOneField(related_name='refresh_token', to='oauth2_provider.AccessToken')),
('application', models.ForeignKey(to=oauth2_settings.APPLICATION_MODEL)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "7b298202b77ebecc24b1a84673d2eb10",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 253,
"avg_line_length": 54.6764705882353,
"alnum_prop": 0.6097364174287251,
"repo_name": "ramcn/demo3",
"id": "7b0b40a36c3d00e4f3d1c07278de7bb33e097f2d",
"size": "3742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python3.4/site-packages/oauth2_provider/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "330662"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "7"
},
{
"name": "HTML",
"bytes": "252755"
},
{
"name": "JavaScript",
"bytes": "136464"
},
{
"name": "Python",
"bytes": "11000226"
},
{
"name": "Shell",
"bytes": "3753"
}
],
"symlink_target": ""
} |
"""Tests for the Windows recycler parsers."""
import unittest
from plaso.lib import definitions
from plaso.parsers import recycler
from tests.parsers import test_lib
class WinRecycleBinParserTest(test_lib.ParserTestCase):
"""Tests for the Windows Recycle Bin parser."""
def testParseVista(self):
"""Tests the Parse function on a Windows Vista RecycleBin file."""
parser = recycler.WinRecycleBinParser()
storage_writer = self._ParseFile(['$II3DF3L.zip'], parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2012-03-12 20:49:58.6330000',
'data_type': 'windows:metadata:deleted_item',
'file_size': 724919,
'original_filename': (
'C:\\Users\\nfury\\Documents\\Alloy Research\\StarFury.zip')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testParseWindows10(self):
"""Tests the Parse function on a Windows 10 RecycleBin file."""
parser = recycler.WinRecycleBinParser()
storage_writer = self._ParseFile(['$I103S5F.jpg'], parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2016-06-29 21:37:45.6180000',
'data_type': 'windows:metadata:deleted_item',
'file_size': 222255,
'original_filename': (
'C:\\Users\\random\\Downloads\\bunnies.jpg')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
class WinRecyclerInfo2ParserTest(test_lib.ParserTestCase):
"""Tests for the Windows Recycler INFO2 parser."""
def testParse(self):
"""Tests the Parse function on a Windows Recycler INFO2 file."""
parser = recycler.WinRecyclerInfo2Parser()
storage_writer = self._ParseFile(['INFO2'], parser)
self.assertEqual(storage_writer.number_of_events, 4)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2004-08-25 16:18:25.2370000',
'data_type': 'windows:metadata:deleted_item',
'drive_number': 2,
'original_filename': (
'C:\\Documents and Settings\\Mr. Evil\\Desktop\\lalsetup250.exe'),
'record_index': 1,
'timestamp_desc': definitions.TIME_DESCRIPTION_DELETED}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ca9d78a209b7f7a2f5f69e7052d3ebcd",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 78,
"avg_line_length": 35.36585365853659,
"alnum_prop": 0.6896551724137931,
"repo_name": "kiddinn/plaso",
"id": "c1f1b179260373c7700dfd7dd72d1e0034be6bf6",
"size": "2947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsers/recycler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "PowerShell",
"bytes": "9560"
},
{
"name": "Python",
"bytes": "4878625"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "26453"
}
],
"symlink_target": ""
} |
"""Tests if the version numbers are consistent in Python and compiled code."""
from asap3 import _asap, __version__
from asap3 import __file__ as mainfile
versionerror = """
OOPS - BAD ASAP INSTALLATION: INCONSISTENT VERSION NUMBERS
Version number of Python code: %s
Version number of compiled code: %s
Perhaps some modules are loaded from the wrong place.
Python main module: %s
Compiled module: %s
Or maybe you just forgot to compile the code after an upgrade.
"""
def check_version(verbose = False):
"Check if the version numbers are consistent in Python and compiled code."
try:
compiled = _asap.get_short_version().strip("'")
except AttributeError:
compiled = "unknown (probably 3.0.0)"
if compiled != __version__:
compiledfile = _asap.__file__
print versionerror % (__version__, compiled, mainfile, compiledfile)
raise RuntimeError, "Inconsistent Asap version numbers (see message above)"
| {
"content_hash": "0f57c777f26884578e83d3ba99ed8c2b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 83,
"avg_line_length": 27.8,
"alnum_prop": 0.6937307297019527,
"repo_name": "auag92/n2dm",
"id": "0004d6fed4e108d654959c3b6688c9bcb6116a89",
"size": "973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Asap-3.8.4/Python/asap3/Internal/checkversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4529"
},
{
"name": "C++",
"bytes": "1472384"
},
{
"name": "CSS",
"bytes": "5059"
},
{
"name": "Jupyter Notebook",
"bytes": "7328"
},
{
"name": "Makefile",
"bytes": "86067"
},
{
"name": "Matlab",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1232765"
},
{
"name": "Shell",
"bytes": "13226"
},
{
"name": "Smarty",
"bytes": "4212"
},
{
"name": "TeX",
"bytes": "5561"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
from scipy.sparse import csr_matrix
from numpy.testing import assert_array_equal
from sklearn._config import config_context, get_config
from sklearn.utils._set_output import _wrap_in_pandas_container
from sklearn.utils._set_output import _safe_set_output
from sklearn.utils._set_output import _SetOutputMixin
from sklearn.utils._set_output import _get_output_config
def test__wrap_in_pandas_container_dense():
"""Check _wrap_in_pandas_container for dense data."""
pd = pytest.importorskip("pandas")
X = np.asarray([[1, 0, 3], [0, 0, 1]])
columns = np.asarray(["f0", "f1", "f2"], dtype=object)
index = np.asarray([0, 1])
dense_named = _wrap_in_pandas_container(X, columns=lambda: columns, index=index)
assert isinstance(dense_named, pd.DataFrame)
assert_array_equal(dense_named.columns, columns)
assert_array_equal(dense_named.index, index)
def test__wrap_in_pandas_container_dense_update_columns_and_index():
"""Check that _wrap_in_pandas_container overrides columns and index."""
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame([[1, 0, 3], [0, 0, 1]], columns=["a", "b", "c"])
new_columns = np.asarray(["f0", "f1", "f2"], dtype=object)
new_index = [10, 12]
new_df = _wrap_in_pandas_container(X_df, columns=new_columns, index=new_index)
assert_array_equal(new_df.columns, new_columns)
assert_array_equal(new_df.index, new_index)
def test__wrap_in_pandas_container_error_validation():
"""Check errors in _wrap_in_pandas_container."""
X = np.asarray([[1, 0, 3], [0, 0, 1]])
X_csr = csr_matrix(X)
match = "Pandas output does not support sparse data"
with pytest.raises(ValueError, match=match):
_wrap_in_pandas_container(X_csr, columns=["a", "b", "c"])
class EstimatorWithoutSetOutputAndWithoutTransform:
pass
class EstimatorNoSetOutputWithTransform:
def transform(self, X, y=None):
return X # pragma: no cover
class EstimatorWithSetOutput(_SetOutputMixin):
def fit(self, X, y=None):
self.n_features_in_ = X.shape[1]
return self
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object)
def test__safe_set_output():
"""Check _safe_set_output works as expected."""
# Estimator without transform will not raise when setting set_output for transform.
est = EstimatorWithoutSetOutputAndWithoutTransform()
_safe_set_output(est, transform="pandas")
# Estimator with transform but without set_output will raise
est = EstimatorNoSetOutputWithTransform()
with pytest.raises(ValueError, match="Unable to configure output"):
_safe_set_output(est, transform="pandas")
est = EstimatorWithSetOutput().fit(np.asarray([[1, 2, 3]]))
_safe_set_output(est, transform="pandas")
config = _get_output_config("transform", est)
assert config["dense"] == "pandas"
_safe_set_output(est, transform="default")
config = _get_output_config("transform", est)
assert config["dense"] == "default"
# transform is None is a no-op, so the config remains "default"
_safe_set_output(est, transform=None)
config = _get_output_config("transform", est)
assert config["dense"] == "default"
class EstimatorNoSetOutputWithTransformNoFeatureNamesOut(_SetOutputMixin):
def transform(self, X, y=None):
return X # pragma: no cover
def test_set_output_mixin():
"""Estimator without get_feature_names_out does not define `set_output`."""
est = EstimatorNoSetOutputWithTransformNoFeatureNamesOut()
assert not hasattr(est, "set_output")
def test__safe_set_output_error():
"""Check transform with invalid config."""
X = np.asarray([[1, 0, 3], [0, 0, 1]])
est = EstimatorWithSetOutput()
_safe_set_output(est, transform="bad")
msg = "output config must be 'default'"
with pytest.raises(ValueError, match=msg):
est.transform(X)
def test_set_output_method():
"""Check that the output is pandas."""
pd = pytest.importorskip("pandas")
X = np.asarray([[1, 0, 3], [0, 0, 1]])
est = EstimatorWithSetOutput().fit(X)
# transform=None is a no-op
est2 = est.set_output(transform=None)
assert est2 is est
X_trans_np = est2.transform(X)
assert isinstance(X_trans_np, np.ndarray)
est.set_output(transform="pandas")
X_trans_pd = est.transform(X)
assert isinstance(X_trans_pd, pd.DataFrame)
def test_set_output_method_error():
"""Check transform fails with invalid transform."""
X = np.asarray([[1, 0, 3], [0, 0, 1]])
est = EstimatorWithSetOutput().fit(X)
est.set_output(transform="bad")
msg = "output config must be 'default'"
with pytest.raises(ValueError, match=msg):
est.transform(X)
def test__get_output_config():
"""Check _get_output_config works as expected."""
# Without a configuration set, the global config is used
global_config = get_config()["transform_output"]
config = _get_output_config("transform")
assert config["dense"] == global_config
with config_context(transform_output="pandas"):
# with estimator=None, the global config is used
config = _get_output_config("transform")
assert config["dense"] == "pandas"
est = EstimatorNoSetOutputWithTransform()
config = _get_output_config("transform", est)
assert config["dense"] == "pandas"
est = EstimatorWithSetOutput()
# If estimator has not config, use global config
config = _get_output_config("transform", est)
assert config["dense"] == "pandas"
# If estimator has a config, use local config
est.set_output(transform="default")
config = _get_output_config("transform", est)
assert config["dense"] == "default"
est.set_output(transform="pandas")
config = _get_output_config("transform", est)
assert config["dense"] == "pandas"
class EstimatorWithSetOutputNoAutoWrap(_SetOutputMixin, auto_wrap_output_keys=None):
def transform(self, X, y=None):
return X
def test_get_output_auto_wrap_false():
"""Check that auto_wrap_output_keys=None does not wrap."""
est = EstimatorWithSetOutputNoAutoWrap()
assert not hasattr(est, "set_output")
X = np.asarray([[1, 0, 3], [0, 0, 1]])
assert X is est.transform(X)
def test_auto_wrap_output_keys_errors_with_incorrect_input():
msg = "auto_wrap_output_keys must be None or a tuple of keys."
with pytest.raises(ValueError, match=msg):
class BadEstimator(_SetOutputMixin, auto_wrap_output_keys="bad_parameter"):
pass
class AnotherMixin:
def __init_subclass__(cls, custom_parameter, **kwargs):
super().__init_subclass__(**kwargs)
cls.custom_parameter = custom_parameter
def test_set_output_mixin_custom_mixin():
"""Check that multiple init_subclasses passes parameters up."""
class BothMixinEstimator(_SetOutputMixin, AnotherMixin, custom_parameter=123):
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return input_features
est = BothMixinEstimator()
assert est.custom_parameter == 123
assert hasattr(est, "set_output")
def test__wrap_in_pandas_container_column_errors():
"""If a callable `columns` errors, it has the same semantics as columns=None."""
pd = pytest.importorskip("pandas")
def get_columns():
raise ValueError("No feature names defined")
X_df = pd.DataFrame({"feat1": [1, 2, 3], "feat2": [3, 4, 5]})
X_wrapped = _wrap_in_pandas_container(X_df, columns=get_columns)
assert_array_equal(X_wrapped.columns, X_df.columns)
X_np = np.asarray([[1, 3], [2, 4], [3, 5]])
X_wrapped = _wrap_in_pandas_container(X_np, columns=get_columns)
assert_array_equal(X_wrapped.columns, range(X_np.shape[1]))
| {
"content_hash": "2a300ee022d79046d86eeecd1eb119f5",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 87,
"avg_line_length": 33.48535564853557,
"alnum_prop": 0.6658752967637136,
"repo_name": "scikit-learn/scikit-learn",
"id": "ae33b75f65c4c027c68e6774582cb8d3ca3a3502",
"size": "8003",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sklearn/utils/tests/test_set_output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "669600"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10545498"
},
{
"name": "Shell",
"bytes": "41551"
}
],
"symlink_target": ""
} |
"""
Gauges Package contains
G{packagetree }
"""
| {
"content_hash": "6d93b10ca0df110cc8de0406774ef87d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 23,
"avg_line_length": 8.333333333333334,
"alnum_prop": 0.66,
"repo_name": "USGSDenverPychron/pychron",
"id": "091298fecefd691523b938dde0d40fdf731df0d2",
"size": "787",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/hardware/gauges/mks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
import itertools
import json
import unittest
import os
from hashlib import md5
import mock
import six.moves.cPickle as pickle
import tempfile
import time
import shutil
import re
import random
import struct
import collections
from eventlet import Timeout, sleep, spawn
from contextlib import closing, contextmanager
from gzip import GzipFile
from shutil import rmtree
from six.moves.urllib.parse import unquote
from swift.common import utils
from swift.common.exceptions import DiskFileError
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import dump_recon_cache
from swift.obj import diskfile, reconstructor as object_reconstructor
from swift.common import ring
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
POLICIES, EC_POLICY)
from swift.obj.reconstructor import REVERT
from test.unit import (patch_policies, debug_logger, mocked_http_conn,
FabricatedRing, make_timestamp_iter,
DEFAULT_TEST_EC_TYPE, encode_frag_archive_bodies,
quiet_eventlet_exceptions)
from test.unit.obj.common import write_diskfile
@contextmanager
def mock_ssync_sender(ssync_calls=None, response_callback=None, **kwargs):
def fake_ssync(daemon, node, job, suffixes):
if ssync_calls is not None:
ssync_calls.append(
{'node': node, 'job': job, 'suffixes': suffixes})
def fake_call():
if response_callback:
response = response_callback(node, job, suffixes)
else:
response = True, {}
return response
return fake_call
with mock.patch('swift.obj.reconstructor.ssync_sender', fake_ssync):
yield fake_ssync
def make_ec_archive_bodies(policy, test_body):
segment_size = policy.ec_segment_size
# split up the body into buffers
chunks = [test_body[x:x + segment_size]
for x in range(0, len(test_body), segment_size)]
# encode the buffers into fragment payloads
fragment_payloads = []
for chunk in chunks:
fragments = \
policy.pyeclib_driver.encode(chunk) * policy.ec_duplication_factor
if not fragments:
break
fragment_payloads.append(fragments)
# join up the fragment payloads per node
ec_archive_bodies = [''.join(frags) for frags in zip(*fragment_payloads)]
return ec_archive_bodies
def _create_test_rings(path, next_part_power=None):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2],
[1, 2, 3],
[2, 3, 0]
]
intended_devs = [
{'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
'port': 6200},
{'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1',
'port': 6200},
{'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
'port': 6200},
{'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
'port': 6200}
]
intended_part_shift = 30
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift,
next_part_power),
f)
testgz = os.path.join(path, 'object-1.ring.gz')
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift,
next_part_power),
f)
def count_stats(logger, key, metric):
count = 0
for record in logger.log_dict[key]:
log_args, log_kwargs = record
m = log_args[0]
if re.match(metric, m):
count += 1
return count
def get_header_frag_index(self, body):
metadata = self.policy.pyeclib_driver.get_metadata(body)
frag_index = struct.unpack('h', metadata[:2])[0]
return {
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
}
@patch_policies([StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1)])
class TestGlobalSetupObjectReconstructor(unittest.TestCase):
# Tests for reconstructor using real objects in test partition directories.
legacy_durable = False
def setUp(self):
self.testdir = tempfile.mkdtemp()
_create_test_rings(self.testdir)
POLICIES[0].object_ring = ring.Ring(self.testdir, ring_name='object')
POLICIES[1].object_ring = ring.Ring(self.testdir, ring_name='object-1')
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = ''
self.devices = os.path.join(self.testdir, 'node')
os.makedirs(self.devices)
os.mkdir(os.path.join(self.devices, 'sda1'))
self.objects = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(POLICIES[0]))
self.objects_1 = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(POLICIES[1]))
os.mkdir(self.objects)
os.mkdir(self.objects_1)
self.parts = {}
self.parts_1 = {}
self.part_nums = ['0', '1', '2']
for part in self.part_nums:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(self.parts[part])
self.parts_1[part] = os.path.join(self.objects_1, part)
os.mkdir(self.parts_1[part])
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
self.logger = debug_logger('test-reconstructor')
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.policy = POLICIES[1]
# most of the reconstructor test methods require that there be
# real objects in place, not just part dirs, so we'll create them
# all here....
# part 0: 3C1/hash/xxx#1#d.data <-- job: sync_only - partners (FI 1)
# 061/hash/xxx#1#d.data <-- included in earlier job (FI 1)
# /xxx#2#d.data <-- job: sync_revert to index 2
# part 1: 3C1/hash/xxx#0#d.data <-- job: sync_only - partners (FI 0)
# /xxx#1#d.data <-- job: sync_revert to index 1
# 061/hash/xxx#1#d.data <-- included in earlier job (FI 1)
# part 2: 3C1/hash/xxx#2#d.data <-- job: sync_revert to index 2
# 061/hash/xxx#0#d.data <-- job: sync_revert to index 0
def _create_frag_archives(policy, obj_path, local_id, obj_set):
# we'll create 2 sets of objects in different suffix dirs
# so we cover all the scenarios we want (3 of them)
# 1) part dir with all FI's matching the local node index
# 2) part dir with one local and mix of others
# 3) part dir with no local FI and one or more others
def part_0(set):
if set == 0:
# just the local
return local_id
else:
# one local and all of another
if obj_num == 0:
return local_id
else:
return (local_id + 1) % 3
def part_1(set):
if set == 0:
# one local and all of another
if obj_num == 0:
return local_id
else:
return (local_id + 2) % 3
else:
# just the local node
return local_id
def part_2(set):
# this part is a handoff in our config (always)
# so lets do a set with indices from different nodes
if set == 0:
return (local_id + 1) % 3
else:
return (local_id + 2) % 3
# function dictionary for defining test scenarios base on set #
scenarios = {'0': part_0,
'1': part_1,
'2': part_2}
def _create_df(obj_num, part_num):
self._create_diskfile(
part=part_num, object_name='o' + str(obj_set),
policy=policy, frag_index=scenarios[part_num](obj_set),
timestamp=utils.Timestamp(t))
for part_num in self.part_nums:
# create 3 unique objects per part, each part
# will then have a unique mix of FIs for the
# possible scenarios
for obj_num in range(0, 3):
_create_df(obj_num, part_num)
ips = utils.whataremyips()
for policy in [p for p in POLICIES if p.policy_type == EC_POLICY]:
self.ec_policy = policy
self.ec_obj_ring = self.reconstructor.load_object_ring(
self.ec_policy)
data_dir = diskfile.get_data_dir(self.ec_policy)
for local_dev in [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]:
self.ec_local_dev = local_dev
dev_path = os.path.join(self.reconstructor.devices_dir,
self.ec_local_dev['device'])
self.ec_obj_path = os.path.join(dev_path, data_dir)
# create a bunch of FA's to test
t = 1421181937.70054 # time.time()
with mock.patch('swift.obj.diskfile.time') as mock_time:
# since (a) we are using a fixed time here to create
# frags which corresponds to all the hardcoded hashes and
# (b) the EC diskfile will delete its .data file right
# after creating if it has expired, use this horrible hack
# to prevent the reclaim happening
mock_time.time.return_value = 0.0
_create_frag_archives(self.ec_policy, self.ec_obj_path,
self.ec_local_dev['id'], 0)
_create_frag_archives(self.ec_policy, self.ec_obj_path,
self.ec_local_dev['id'], 1)
break
break
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _create_diskfile(self, policy=None, part=0, object_name='o',
frag_index=0, timestamp=None, test_data=None):
policy = policy or self.policy
df_mgr = self.reconstructor._df_router[policy]
df = df_mgr.get_diskfile('sda1', part, 'a', 'c', object_name,
policy=policy)
timestamp = timestamp or utils.Timestamp.now()
test_data = test_data or 'test data'
write_diskfile(df, timestamp, data=test_data, frag_index=frag_index,
legacy_durable=self.legacy_durable)
return df
def assert_expected_jobs(self, part_num, jobs):
for job in jobs:
del job['path']
del job['policy']
if 'local_index' in job:
del job['local_index']
job['suffixes'].sort()
expected = []
# part num 0
expected.append(
[{
'sync_to': [{
'index': 2,
'replication_port': 6200,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061'],
'partition': 0,
'frag_index': 2,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1', 'port': 6200,
},
'hashes': {
'061': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}, {
'sync_to': [{
'index': 0,
'replication_port': 6200,
'zone': 0,
'ip': '127.0.0.0',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.0',
'device': 'sda1', 'id': 0,
}, {
'index': 2,
'replication_port': 6200,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.SYNC,
'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
'suffixes': ['061', '3c1'],
'partition': 0,
'frag_index': 1,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6200,
},
'hashes':
{
'061': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229',
1: '0e6e8d48d801dc89fd31904ae3b31229'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}]
)
# part num 1
expected.append(
[{
'sync_to': [{
'index': 1,
'replication_port': 6200,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061', '3c1'],
'partition': 1,
'frag_index': 1,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6200,
},
'hashes':
{
'061': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}, {
'sync_to': [{
'index': 2,
'replication_port': 6200,
'zone': 4,
'ip': '127.0.0.3',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.3',
'device': 'sda1', 'id': 3,
}, {
'index': 1,
'replication_port': 6200,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.SYNC,
'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
'suffixes': ['3c1'],
'partition': 1,
'frag_index': 0,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6200,
},
'hashes': {
'061': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}]
)
# part num 2
expected.append(
[{
'sync_to': [{
'index': 0,
'replication_port': 6200,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.2',
'device': 'sda1', 'id': 2,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061'],
'partition': 2,
'frag_index': 0,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6200,
},
'hashes': {
'061': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229'
},
},
}, {
'sync_to': [{
'index': 2,
'replication_port': 6200,
'zone': 0,
'ip': '127.0.0.0',
'region': 1,
'port': 6200,
'replication_ip': '127.0.0.0',
'device': 'sda1',
'id': 0,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['3c1'],
'partition': 2,
'frag_index': 2,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6200
},
'hashes': {
'061': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229'
},
},
}]
)
def check_jobs(part_num):
try:
expected_jobs = expected[int(part_num)]
except (IndexError, ValueError):
self.fail('Unknown part number %r' % part_num)
expected_by_part_frag_index = dict(
((j['partition'], j['frag_index']), j) for j in expected_jobs)
for job in jobs:
job_key = (job['partition'], job['frag_index'])
if job_key in expected_by_part_frag_index:
for k, value in job.items():
expected_value = \
expected_by_part_frag_index[job_key][k]
try:
if isinstance(value, list):
value.sort()
expected_value.sort()
self.assertEqual(value, expected_value)
except AssertionError as e:
extra_info = \
'\n\n... for %r in part num %s job %r' % (
k, part_num, job_key)
raise AssertionError(str(e) + extra_info)
else:
self.fail(
'Unexpected job %r for part num %s - '
'expected jobs where %r' % (
job_key, part_num,
expected_by_part_frag_index.keys()))
for expected_job in expected_jobs:
if expected_job in jobs:
jobs.remove(expected_job)
self.assertFalse(jobs) # that should be all of them
check_jobs(part_num)
def _run_once(self, http_count, extra_devices, override_devices=None):
ring_devs = list(self.policy.object_ring.devs)
for device, parts in extra_devices.items():
device_path = os.path.join(self.devices, device)
os.mkdir(device_path)
for part in range(parts):
os.makedirs(os.path.join(device_path, 'objects-1', str(part)))
# we update the ring to make is_local happy
devs = [dict(d) for d in ring_devs]
for d in devs:
d['device'] = device
self.policy.object_ring.devs.extend(devs)
self.reconstructor.stats_interval = 0
self.process_job = lambda j: sleep(0)
with mocked_http_conn(*[200] * http_count, body=pickle.dumps({})):
with mock_ssync_sender():
self.reconstructor.run_once(devices=override_devices)
def test_run_once(self):
# sda1: 3 is done in setup
extra_devices = {
'sdb1': 4,
'sdc1': 1,
'sdd1': 0,
}
self._run_once(18, extra_devices)
stats_lines = set()
for line in self.logger.get_lines_for_level('info'):
if 'reconstructed in' not in line:
continue
stat_line = line.split('reconstructed', 1)[0].strip()
stats_lines.add(stat_line)
acceptable = set([
'3/8 (37.50%) partitions',
'5/8 (62.50%) partitions',
'8/8 (100.00%) partitions',
])
matched = stats_lines & acceptable
self.assertEqual(matched, acceptable,
'missing some expected acceptable:\n%s' % (
'\n'.join(sorted(acceptable - matched))))
self.assertEqual(self.reconstructor.reconstruction_part_count, 8)
self.assertEqual(self.reconstructor.part_count, 8)
def test_run_once_override_devices(self):
# sda1: 3 is done in setup
extra_devices = {
'sdb1': 4,
'sdc1': 1,
'sdd1': 0,
}
self._run_once(2, extra_devices, 'sdc1')
stats_lines = set()
for line in self.logger.get_lines_for_level('info'):
if 'reconstructed in' not in line:
continue
stat_line = line.split('reconstructed', 1)[0].strip()
stats_lines.add(stat_line)
acceptable = set([
'1/1 (100.00%) partitions',
])
matched = stats_lines & acceptable
self.assertEqual(matched, acceptable,
'missing some expected acceptable:\n%s' % (
'\n'.join(sorted(acceptable - matched))))
self.assertEqual(self.reconstructor.reconstruction_part_count, 1)
self.assertEqual(self.reconstructor.part_count, 1)
def test_get_response(self):
part = self.part_nums[0]
node = self.policy.object_ring.get_part_nodes(int(part))[0]
def do_test(stat_code):
with mocked_http_conn(stat_code):
resp = self.reconstructor._get_response(node, part,
path='nada',
headers={},
full_path='nada/nada')
return resp
resp = do_test(200)
self.assertEqual(resp.status, 200)
resp = do_test(400)
# on the error case return value will be None instead of response
self.assertIsNone(resp)
# ... and log warnings for 400
for line in self.logger.get_lines_for_level('warning'):
self.assertIn('Invalid response 400', line)
self.logger._clear()
resp = do_test(Exception())
self.assertIsNone(resp)
# exception should result in error logs
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Trying to GET', line)
self.logger._clear()
# Timeout also should result in error logs
resp = do_test(Timeout())
self.assertIsNone(resp)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Trying to GET', line)
# sanity Timeout has extra message in the error log
self.assertIn('Timeout', line)
self.logger.clear()
# we should get a warning on 503 (sanity)
resp = do_test(503)
self.assertIsNone(resp)
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warnings))
self.assertIn('Invalid response 503', warnings[0])
self.logger.clear()
# ... but no messages should be emitted for 404
resp = do_test(404)
self.assertIsNone(resp)
for level, msgs in self.logger.lines_dict.items():
self.assertFalse(msgs)
def test_reconstructor_skips_bogus_partition_dirs(self):
# A directory in the wrong place shouldn't crash the reconstructor
self.reconstructor._reset_stats()
rmtree(self.objects_1)
os.mkdir(self.objects_1)
os.mkdir(os.path.join(self.objects_1, "burrito"))
jobs = []
for part_info in self.reconstructor.collect_parts():
jobs += self.reconstructor.build_reconstruction_jobs(part_info)
self.assertFalse(jobs)
def test_check_ring(self):
testring = tempfile.mkdtemp()
_create_test_rings(testring)
obj_ring = ring.Ring(testring, ring_name='object') # noqa
self.assertTrue(self.reconstructor.check_ring(obj_ring))
orig_check = self.reconstructor.next_check
self.reconstructor.next_check = orig_check - 30
self.assertTrue(self.reconstructor.check_ring(obj_ring))
self.reconstructor.next_check = orig_check
orig_ring_time = obj_ring._mtime
obj_ring._mtime = orig_ring_time - 30
self.assertTrue(self.reconstructor.check_ring(obj_ring))
self.reconstructor.next_check = orig_check - 30
self.assertFalse(self.reconstructor.check_ring(obj_ring))
rmtree(testring, ignore_errors=1)
def test_reconstruct_check_ring(self):
# test reconstruct logs info when check_ring is false and that
# there are no jobs built
with mock.patch('swift.obj.reconstructor.ObjectReconstructor.'
'check_ring', return_value=False):
self.reconstructor.reconstruct()
msgs = self.reconstructor.logger.get_lines_for_level('info')
self.assertIn('Ring change detected. Aborting'
' current reconstruction pass.', msgs[0])
self.assertEqual(self.reconstructor.reconstruction_count, 0)
def test_build_reconstruction_jobs(self):
self.reconstructor._reset_stats()
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertTrue(jobs[0]['job_type'] in
(object_reconstructor.SYNC,
object_reconstructor.REVERT))
self.assert_expected_jobs(part_info['partition'], jobs)
def test_handoffs_only(self):
self.reconstructor.handoffs_only = True
found_job_types = set()
def fake_process_job(job):
# increment failure counter
self.reconstructor.handoffs_remaining += 1
found_job_types.add(job['job_type'])
self.reconstructor.process_job = fake_process_job
_orig_build_jobs = self.reconstructor.build_reconstruction_jobs
built_jobs = []
def capture_jobs(part_info):
jobs = _orig_build_jobs(part_info)
built_jobs.append((part_info, jobs))
return jobs
with mock.patch.object(self.reconstructor, 'build_reconstruction_jobs',
capture_jobs):
self.reconstructor.reconstruct()
# only revert jobs
found = [(part_info['partition'], set(
j['job_type'] for j in jobs))
for part_info, jobs in built_jobs]
self.assertEqual([
# partition, job_types
(2, {'sync_revert'}),
], found)
self.assertEqual(found_job_types, {object_reconstructor.REVERT})
# but failures keep handoffs remaining
msgs = self.reconstructor.logger.get_lines_for_level('info')
self.assertIn('Next pass will continue to revert handoffs', msgs[-1])
self.logger._clear()
found_job_types = set()
def fake_process_job(job):
# success does not increment failure counter
found_job_types.add(job['job_type'])
self.reconstructor.process_job = fake_process_job
# only revert jobs ... but all handoffs cleared out successfully
self.reconstructor.reconstruct()
self.assertEqual(found_job_types, {object_reconstructor.REVERT})
# it's time to turn off handoffs_only
msgs = self.reconstructor.logger.get_lines_for_level('warning')
self.assertIn('You should disable handoffs_only', msgs[-1])
def test_get_partners(self):
# we're going to perform an exhaustive test of every possible
# combination of partitions and nodes in our custom test ring
# format: [dev_id in question, 'part_num',
# [part_nodes for the given part], left id, right id...]
expected_partners = sorted([
(0, '0', [0, 1, 2], 2, 1), (0, '2', [2, 3, 0], 3, 2),
(1, '0', [0, 1, 2], 0, 2), (1, '1', [1, 2, 3], 3, 2),
(2, '0', [0, 1, 2], 1, 0), (2, '1', [1, 2, 3], 1, 3),
(2, '2', [2, 3, 0], 0, 3), (3, '1', [1, 2, 3], 2, 1),
(3, '2', [2, 3, 0], 2, 0), (0, '0', [0, 1, 2], 2, 1),
(0, '2', [2, 3, 0], 3, 2), (1, '0', [0, 1, 2], 0, 2),
(1, '1', [1, 2, 3], 3, 2), (2, '0', [0, 1, 2], 1, 0),
(2, '1', [1, 2, 3], 1, 3), (2, '2', [2, 3, 0], 0, 3),
(3, '1', [1, 2, 3], 2, 1), (3, '2', [2, 3, 0], 2, 0),
])
got_partners = []
for pol in POLICIES:
obj_ring = pol.object_ring
for part_num in self.part_nums:
part_nodes = obj_ring.get_part_nodes(int(part_num))
primary_ids = [n['id'] for n in part_nodes]
for node in part_nodes:
partners = object_reconstructor._get_partners(
node['index'], part_nodes)
left = partners[0]['id']
right = partners[1]['id']
got_partners.append((
node['id'], part_num, primary_ids, left, right))
self.assertEqual(expected_partners, sorted(got_partners))
def test_collect_parts(self):
self.reconstructor._reset_stats()
parts = []
for part_info in self.reconstructor.collect_parts():
parts.append(part_info['partition'])
self.assertEqual(sorted(parts), [0, 1, 2])
def test_collect_parts_mkdirs_error(self):
def blowup_mkdirs(path):
raise OSError('Ow!')
self.reconstructor._reset_stats()
with mock.patch.object(object_reconstructor, 'mkdirs', blowup_mkdirs):
rmtree(self.objects_1, ignore_errors=1)
parts = []
for part_info in self.reconstructor.collect_parts():
parts.append(part_info['partition'])
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1,
'Expected only one error, got %r' % error_lines)
log_args, log_kwargs = self.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]), 'Ow!')
def test_removes_zbf(self):
# After running xfs_repair, a partition directory could become a
# zero-byte file. If this happens, the reconstructor should clean it
# up, log something, and move on to the next partition.
# Surprise! Partition dir 1 is actually a zero-byte file.
pol_1_part_1_path = os.path.join(self.objects_1, '1')
rmtree(pol_1_part_1_path)
with open(pol_1_part_1_path, 'w'):
pass
self.assertTrue(os.path.isfile(pol_1_part_1_path)) # sanity check
self.reconstructor.process_job = lambda j: None
self.reconstructor.reconstruct()
self.assertFalse(os.path.exists(pol_1_part_1_path))
warnings = self.reconstructor.logger.get_lines_for_level('warning')
self.assertEqual(2, len(warnings))
# first warning is due to get_hashes failing to take lock on non-dir
self.assertIn(pol_1_part_1_path + '/hashes.pkl', warnings[0])
self.assertIn('unable to read', warnings[0].lower())
self.assertIn(pol_1_part_1_path, warnings[1])
self.assertIn('not a directory', warnings[1].lower())
def test_ignores_status_file(self):
# Following fd86d5a, the auditor will leave status files on each device
# until an audit can complete. The reconstructor should ignore these
@contextmanager
def status_files(*auditor_types):
status_paths = [os.path.join(self.objects_1,
'auditor_status_%s.json' % typ)
for typ in auditor_types]
for status_path in status_paths:
self.assertFalse(os.path.exists(status_path)) # sanity check
with open(status_path, 'w'):
pass
self.assertTrue(os.path.isfile(status_path)) # sanity check
try:
yield status_paths
finally:
for status_path in status_paths:
try:
os.unlink(status_path)
except OSError as e:
if e.errno != 2:
raise
# since our collect_parts job is a generator, that yields directly
# into build_jobs and then spawns it's safe to do the remove_files
# without making reconstructor startup slow
with status_files('ALL', 'ZBF') as status_paths:
self.reconstructor._reset_stats()
for part_info in self.reconstructor.collect_parts():
self.assertNotIn(part_info['part_path'], status_paths)
warnings = self.reconstructor.logger.get_lines_for_level('warning')
self.assertEqual(0, len(warnings))
for status_path in status_paths:
self.assertTrue(os.path.exists(status_path))
def _make_fake_ssync(self, ssync_calls, fail_jobs=None):
"""
Replace SsyncSender with a thin Fake.
:param ssync_calls: an empty list, a non_local, all calls to ssync will
be captured for assertion in the caller.
:param fail_jobs: optional iter of dicts, any job passed into Fake that
matches a failure dict will return success == False.
"""
class _fake_ssync(object):
def __init__(self, daemon, node, job, suffixes, **kwargs):
# capture context and generate an available_map of objs
context = {}
context['node'] = node
context['job'] = job
context['suffixes'] = suffixes
self.suffixes = suffixes
self.daemon = daemon
self.job = job
hash_gen = self.daemon._df_router[job['policy']].yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'))
self.available_map = {}
for hash_, timestamps in hash_gen:
self.available_map[hash_] = timestamps
context['available_map'] = self.available_map
ssync_calls.append(context)
self.success = True
for failure in (fail_jobs or []):
if all(job.get(k) == v for (k, v) in failure.items()):
self.success = False
break
context['success'] = self.success
def __call__(self, *args, **kwargs):
return self.success, self.available_map if self.success else {}
return _fake_ssync
def test_delete_reverted(self):
# verify reconstructor deletes reverted frag indexes after ssync'ing
def visit_obj_dirs(context):
for suff in context['suffixes']:
suff_dir = os.path.join(
context['job']['path'], suff)
for root, dirs, files in os.walk(suff_dir):
for d in dirs:
dirpath = os.path.join(root, d)
files = os.listdir(dirpath)
yield dirpath, files
n_files = n_files_after = 0
# run reconstructor with delete function mocked out to check calls
ssync_calls = []
delete_func =\
'swift.obj.reconstructor.ObjectReconstructor.delete_reverted_objs'
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
with mock.patch(delete_func) as mock_delete:
self.reconstructor.reconstruct()
expected_calls = []
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
for dirpath, files in visit_obj_dirs(context):
# sanity check - expect some files to be in dir,
# may not be for the reverted frag index
self.assertTrue(files)
n_files += len(files)
expected_calls.append(mock.call(context['job'],
context['available_map'],
context['node']['index']))
mock_delete.assert_has_calls(expected_calls, any_order=True)
ssync_calls = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
self.reconstructor.reconstruct()
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
data_file_tail = ('#%s.data'
% context['node']['index'])
for dirpath, files in visit_obj_dirs(context):
n_files_after += len(files)
for filename in files:
self.assertFalse(
filename.endswith(data_file_tail))
# sanity check that some files should were deleted
self.assertGreater(n_files, n_files_after)
def test_no_delete_failed_revert(self):
# test will only process revert jobs
self.reconstructor.handoffs_only = True
captured_ssync = []
# fail all jobs on part 2 on sda1
fail_jobs = [
{'device': 'sda1', 'partition': 2},
]
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(
captured_ssync, fail_jobs=fail_jobs)), \
mocked_http_conn() as request_log:
self.reconstructor.reconstruct()
self.assertFalse(request_log.unexpected_requests)
# global setup has four revert jobs
self.assertEqual(len(captured_ssync), 2)
expected_ssync_calls = {
# device, part, frag_index: expected_occurrences
('sda1', 2, 2): 1,
('sda1', 2, 0): 1,
}
self.assertEqual(expected_ssync_calls, dict(collections.Counter(
(context['job']['device'],
context['job']['partition'],
context['job']['frag_index'])
for context in captured_ssync
)))
# failed jobs don't sync suffixes
self.assertFalse(
self.reconstructor.logger.get_lines_for_level('warning'))
self.assertFalse(
self.reconstructor.logger.get_lines_for_level('error'))
# handoffs remaining and part exists
self.assertEqual(2, self.reconstructor.handoffs_remaining)
self.assertTrue(os.path.exists(self.parts_1['2']))
# again with no failures
captured_ssync = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(captured_ssync)), \
mocked_http_conn(
200, 200, body=pickle.dumps({})) as request_log:
self.reconstructor.reconstruct()
self.assertFalse(request_log.unexpected_requests)
# same jobs
self.assertEqual(len(captured_ssync), 2)
# but this time we rehash at the end
expected_suffix_calls = []
for context in captured_ssync:
if not context['success']:
# only successful jobs generate suffix rehash calls
continue
job = context['job']
expected_suffix_calls.append(
(job['sync_to'][0]['replication_ip'], '/%s/%s/%s' % (
job['device'], job['partition'],
'-'.join(sorted(job['suffixes']))))
)
self.assertEqual(set(expected_suffix_calls),
set((r['ip'], r['path'])
for r in request_log.requests))
self.assertFalse(
self.reconstructor.logger.get_lines_for_level('error'))
# handoffs are cleaned up
self.assertEqual(0, self.reconstructor.handoffs_remaining)
warning_msgs = self.reconstructor.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_msgs))
self.assertIn('no handoffs remaining', warning_msgs[0])
# need one more pass to cleanup the part dir
self.assertTrue(os.path.exists(self.parts_1['2']))
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync([])), \
mocked_http_conn() as request_log:
self.reconstructor.reconstruct()
self.assertFalse(os.path.exists(self.parts_1['2']))
def test_get_part_jobs(self):
# yeah, this test code expects a specific setup
self.assertEqual(len(self.part_nums), 3)
# OK, at this point we should have 4 loaded parts with one
jobs = []
for partition in os.listdir(self.ec_obj_path):
part_path = os.path.join(self.ec_obj_path, partition)
jobs = self.reconstructor._get_part_jobs(
self.ec_local_dev, part_path, int(partition), self.ec_policy)
self.assert_expected_jobs(partition, jobs)
def assertStatCount(self, stat_method, stat_prefix, expected_count):
count = count_stats(self.logger, stat_method, stat_prefix)
msg = 'expected %s != %s for %s %s' % (
expected_count, count, stat_method, stat_prefix)
self.assertEqual(expected_count, count, msg)
def test_delete_partition(self):
# part 2 is predefined to have all revert jobs
part_path = os.path.join(self.objects_1, '2')
self.assertTrue(os.access(part_path, os.F_OK))
ssync_calls = []
status = [200] * 2
body = pickle.dumps({})
with mocked_http_conn(*status, body=body) as request_log:
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct(override_partitions=[2])
expected_repliate_calls = set([
('127.0.0.0', '/sda1/2/3c1'),
('127.0.0.2', '/sda1/2/061'),
])
found_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_repliate_calls, found_calls)
expected_ssync_calls = sorted([
('127.0.0.0', REVERT, 2, ['3c1']),
('127.0.0.2', REVERT, 2, ['061']),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['job_type'],
c['job']['partition'],
c['suffixes'],
) for c in ssync_calls))
expected_stats = {
('increment', 'partition.delete.count.'): 2,
('timing_since', 'partition.delete.timing'): 2,
}
for stat_key, expected in expected_stats.items():
stat_method, stat_prefix = stat_key
self.assertStatCount(stat_method, stat_prefix, expected)
# part 2 should be totally empty
hash_gen = self.reconstructor._df_router[self.policy].yield_hashes(
'sda1', '2', self.policy)
for path, hash_, ts in hash_gen:
self.fail('found %s with %s in %s' % (hash_, ts, path))
# but the partition directory and hashes pkl still exist
self.assertTrue(os.access(part_path, os.F_OK))
hashes_path = os.path.join(self.objects_1, '2', diskfile.HASH_FILE)
self.assertTrue(os.access(hashes_path, os.F_OK))
# ... but on next pass
ssync_calls = []
with mocked_http_conn() as request_log:
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct(override_partitions=[2])
# reconstruct won't generate any replicate or ssync_calls
self.assertFalse(request_log.requests)
self.assertFalse(ssync_calls)
# and the partition will get removed!
self.assertFalse(os.access(part_path, os.F_OK))
def test_process_job_all_success(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
node_count = len(job['sync_to'])
self.reconstructor.process_job(job)
if job['job_type'] == object_reconstructor.REVERT:
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
else:
self.assertStatCount('update_stats',
'suffix.hashes',
node_count)
self.assertEqual(node_count, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(node_count, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertNotIn('error', self.logger.all_log_lines())
self.assertEqual(
dict(collections.Counter(
(job['device'], job['partition'], job['frag_index'])
for job in found_jobs)),
{('sda1', 0, 1): 1,
('sda1', 0, 2): 1,
('sda1', 1, 0): 1,
('sda1', 1, 1): 1,
('sda1', 2, 0): 1,
('sda1', 2, 2): 1})
self.assertEqual(self.reconstructor.suffix_sync, 8)
self.assertEqual(self.reconstructor.suffix_count, 8)
self.assertEqual(self.reconstructor.reconstruction_count, 6)
def test_process_job_all_insufficient_storage(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[507] * 8):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('responded as unmounted', line)
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertEqual(
dict(collections.Counter(
(job['device'], job['partition'], job['frag_index'])
for job in found_jobs)),
{('sda1', 0, 1): 1,
('sda1', 0, 2): 1,
('sda1', 1, 0): 1,
('sda1', 1, 1): 1,
('sda1', 2, 0): 1,
('sda1', 2, 2): 1})
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(self.reconstructor.reconstruction_count, 6)
def test_process_job_all_client_error(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[400] * 8):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Invalid response 400', line)
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertEqual(
dict(collections.Counter(
(job['device'], job['partition'], job['frag_index'])
for job in found_jobs)),
{('sda1', 0, 1): 1,
('sda1', 0, 2): 1,
('sda1', 1, 0): 1,
('sda1', 1, 1): 1,
('sda1', 2, 0): 1,
('sda1', 2, 2): 1})
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(self.reconstructor.reconstruction_count, 6)
def test_process_job_all_timeout(self):
self.reconstructor._reset_stats()
with mock_ssync_sender(), mocked_http_conn(*[Timeout()] * 8):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Timeout (Nones)', line)
self.assertStatCount(
'update_stats', 'suffix.hashes', 0)
self.assertStatCount(
'update_stats', 'suffix.syncs', 0)
self.assertEqual(
dict(collections.Counter(
(job['device'], job['partition'], job['frag_index'])
for job in found_jobs)),
{('sda1', 0, 1): 1,
('sda1', 0, 2): 1,
('sda1', 1, 0): 1,
('sda1', 1, 1): 1,
('sda1', 2, 0): 1,
('sda1', 2, 2): 1})
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(self.reconstructor.reconstruction_count, 6)
def test_reconstructor_skipped_partpower_increase(self):
self.reconstructor._reset_stats()
_create_test_rings(self.testdir, 10)
# Enforce re-reading the EC ring
POLICIES[1].object_ring = ring.Ring(self.testdir, ring_name='object-1')
self.reconstructor.reconstruct()
self.assertEqual(0, self.reconstructor.reconstruction_count)
warnings = self.reconstructor.logger.get_lines_for_level('warning')
self.assertIn(
"next_part_power set in policy 'one'. Skipping", warnings)
class TestGlobalSetupObjectReconstructorLegacyDurable(
TestGlobalSetupObjectReconstructor):
# Tests for reconstructor using real objects in test partition directories.
legacy_durable = True
@patch_policies(with_ec_default=True)
class TestWorkerReconstructor(unittest.TestCase):
maxDiff = None
def setUp(self):
super(TestWorkerReconstructor, self).setUp()
self.logger = debug_logger()
self.testdir = tempfile.mkdtemp()
self.recon_cache_path = os.path.join(self.testdir, 'recon')
self.rcache = os.path.join(self.recon_cache_path, 'object.recon')
# dump_recon_cache expects recon_cache_path to exist
os.mkdir(self.recon_cache_path)
def tearDown(self):
super(TestWorkerReconstructor, self).tearDown()
shutil.rmtree(self.testdir)
def test_no_workers_by_default(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{}, logger=self.logger)
self.assertEqual(0, reconstructor.reconstructor_workers)
self.assertEqual(0, len(list(reconstructor.get_worker_args())))
def test_bad_value_workers(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '-1'}, logger=self.logger)
self.assertEqual(-1, reconstructor.reconstructor_workers)
self.assertEqual(0, len(list(reconstructor.get_worker_args())))
def test_workers_with_no_devices(self):
def do_test(num_workers):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': num_workers}, logger=self.logger)
self.assertEqual(num_workers, reconstructor.reconstructor_workers)
self.assertEqual(1, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [], 'override_devices': []},
], list(reconstructor.get_worker_args()))
do_test(1)
do_test(10)
def test_workers_with_devices_and_no_valid_overrides(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(2, reconstructor.reconstructor_workers)
# N.B. sdz is not in local_devices so there are no devices to process
# but still expect a single worker process
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdz'))
self.assertEqual(1, len(worker_args))
self.assertEqual([{'override_partitions': [],
'override_devices': ['sdz']}],
worker_args)
# overrides are ignored in forever mode
worker_args = list(reconstructor.get_worker_args(
once=False, devices='sdz'))
self.assertEqual(2, len(worker_args))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb']},
{'override_partitions': [], 'override_devices': ['sdc']}
], worker_args)
def test_workers_with_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(2, reconstructor.reconstructor_workers)
self.assertEqual(2, len(list(reconstructor.get_worker_args())))
expected = [
{'override_partitions': [], 'override_devices': ['sdb']},
{'override_partitions': [], 'override_devices': ['sdc']},
]
worker_args = list(reconstructor.get_worker_args(once=False))
self.assertEqual(2, len(worker_args))
self.assertEqual(expected, worker_args)
worker_args = list(reconstructor.get_worker_args(once=True))
self.assertEqual(2, len(worker_args))
self.assertEqual(expected, worker_args)
def test_workers_with_devices_and_overrides(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(2, reconstructor.reconstructor_workers)
# check we don't get more workers than override devices...
# N.B. sdz is not in local_devices so should be ignored for the
# purposes of generating workers
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdb,sdz', partitions='99,333'))
self.assertEqual(1, len(worker_args))
self.assertEqual(
[{'override_partitions': [99, 333], 'override_devices': ['sdb']}],
worker_args)
# overrides are ignored in forever mode
worker_args = list(reconstructor.get_worker_args(
once=False, devices='sdb,sdz', partitions='99,333'))
self.assertEqual(2, len(worker_args))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb']},
{'override_partitions': [], 'override_devices': ['sdc']}
], worker_args)
def test_workers_with_lots_of_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'sdb', 'sdc', 'sdd', 'sde', 'sdf']
self.assertEqual(2, reconstructor.reconstructor_workers)
self.assertEqual(2, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [], 'override_devices': [
'sdb', 'sdd', 'sdf']},
{'override_partitions': [], 'override_devices': [
'sdc', 'sde']},
], list(reconstructor.get_worker_args()))
def test_workers_with_lots_of_devices_and_overrides(self):
# check that override devices get distributed across workers
# in similar fashion to all devices
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'sdb', 'sdc', 'sdd', 'sde', 'sdf']
self.assertEqual(2, reconstructor.reconstructor_workers)
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdb,sdd,sdf', partitions='99,333'))
self.assertEqual(1, len(worker_args))
# 5 devices in total, 2 workers -> up to 3 devices per worker so a
# single worker should handle the requested override devices
self.assertEqual([
{'override_partitions': [99, 333], 'override_devices': [
'sdb', 'sdd', 'sdf']},
], worker_args)
# with 4 override devices, expect 2 per worker
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdb,sdc,sdd,sdf', partitions='99,333'))
self.assertEqual(2, len(worker_args))
self.assertEqual([
{'override_partitions': [99, 333], 'override_devices': [
'sdb', 'sdd']},
{'override_partitions': [99, 333], 'override_devices': [
'sdc', 'sdf']},
], worker_args)
def test_workers_with_lots_of_workers(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '10'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(10, reconstructor.reconstructor_workers)
self.assertEqual(2, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb']},
{'override_partitions': [], 'override_devices': ['sdc']},
], list(reconstructor.get_worker_args()))
def test_workers_with_lots_of_workers_and_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '10'}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'sdb', 'sdc', 'sdd', 'sde', 'sdf']
self.assertEqual(10, reconstructor.reconstructor_workers)
self.assertEqual(5, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb']},
{'override_partitions': [], 'override_devices': ['sdc']},
{'override_partitions': [], 'override_devices': ['sdd']},
{'override_partitions': [], 'override_devices': ['sde']},
{'override_partitions': [], 'override_devices': ['sdf']},
], list(reconstructor.get_worker_args()))
def test_workers_with_some_workers_and_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'd%s' % (i + 1) for i in range(21)]
# ... with many devices per worker, worker count is pretty granular
for i in range(1, 8):
reconstructor.reconstructor_workers = i
self.assertEqual(i, len(list(reconstructor.get_worker_args())))
# ... then it gets sorta stair step
for i in range(9, 10):
reconstructor.reconstructor_workers = i
self.assertEqual(7, len(list(reconstructor.get_worker_args())))
# 2-3 devices per worker
for args in reconstructor.get_worker_args():
self.assertIn(len(args['override_devices']), (2, 3))
for i in range(11, 20):
reconstructor.reconstructor_workers = i
self.assertEqual(11, len(list(reconstructor.get_worker_args())))
# 1, 2 devices per worker
for args in reconstructor.get_worker_args():
self.assertIn(len(args['override_devices']), (1, 2))
# this is debatable, but maybe I'll argue if you're going to have
# *some* workers with > 1 device, it's better to have fewer workers
# with devices spread out evenly than a couple outliers?
self.assertEqual([
{'override_partitions': [], 'override_devices': ['d1', 'd12']},
{'override_partitions': [], 'override_devices': ['d2', 'd13']},
{'override_partitions': [], 'override_devices': ['d3', 'd14']},
{'override_partitions': [], 'override_devices': ['d4', 'd15']},
{'override_partitions': [], 'override_devices': ['d5', 'd16']},
{'override_partitions': [], 'override_devices': ['d6', 'd17']},
{'override_partitions': [], 'override_devices': ['d7', 'd18']},
{'override_partitions': [], 'override_devices': ['d8', 'd19']},
{'override_partitions': [], 'override_devices': ['d9', 'd20']},
{'override_partitions': [], 'override_devices': ['d10', 'd21']},
{'override_partitions': [], 'override_devices': ['d11']},
], list(reconstructor.get_worker_args()))
# you can't get < than 1 device per worker
for i in range(21, 52):
reconstructor.reconstructor_workers = i
self.assertEqual(21, len(list(reconstructor.get_worker_args())))
for args in reconstructor.get_worker_args():
self.assertEqual(1, len(args['override_devices']))
def test_next_rcache_update_configured_with_stats_interval(self):
now = time.time()
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor = object_reconstructor.ObjectReconstructor(
{}, logger=self.logger)
self.assertEqual(now + 300, reconstructor._next_rcache_update)
reconstructor = object_reconstructor.ObjectReconstructor(
{'stats_interval': '30'}, logger=self.logger)
self.assertEqual(now + 30, reconstructor._next_rcache_update)
def test_is_healthy_rcache_update_waits_for_next_update(self):
now = time.time()
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
# file does not exist to start
self.assertFalse(os.path.exists(self.rcache))
self.assertTrue(reconstructor.is_healthy())
# ... and isn't created until _next_rcache_update
self.assertFalse(os.path.exists(self.rcache))
# ... but if we wait 5 mins (by default)
orig_next_update = reconstructor._next_rcache_update
with mock.patch('swift.obj.reconstructor.time.time',
return_value=now + 301):
self.assertTrue(reconstructor.is_healthy())
self.assertGreater(reconstructor._next_rcache_update, orig_next_update)
# ... it will be created
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
# and empty
self.assertEqual({}, data)
def test_is_healthy(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
self.assertTrue(reconstructor.is_healthy())
reconstructor.get_local_devices = lambda: {
'sdb%d' % p for p in reconstructor.policies}
self.assertFalse(reconstructor.is_healthy())
reconstructor.all_local_devices = {
'sdb%d' % p for p in reconstructor.policies}
self.assertTrue(reconstructor.is_healthy())
def test_is_healthy_detects_ring_change(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path,
'reconstructor_workers': 1,
# bind ip and port will not match any dev in first version of ring
'bind_ip': '10.0.0.20', 'bind_port': '1020'},
logger=self.logger)
p = random.choice(reconstructor.policies)
self.assertEqual(14, len(p.object_ring.devs)) # sanity check
worker_args = list(reconstructor.get_worker_args())
self.assertFalse(worker_args[0]['override_devices']) # no local devs
self.assertTrue(reconstructor.is_healthy())
# expand ring - now there are local devices
p.object_ring.set_replicas(28)
self.assertEqual(28, len(p.object_ring.devs)) # sanity check
self.assertFalse(reconstructor.is_healthy())
self.assertNotEqual(worker_args, list(reconstructor.get_worker_args()))
self.assertTrue(reconstructor.is_healthy())
def test_final_recon_dump(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
reconstructor.all_local_devices = ['sda', 'sdc']
total = 12.0
now = time.time()
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': now,
'object_reconstruction_time': total,
}, data)
total = 14.0
now += total * 60
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': now,
'object_reconstruction_time': total,
}, data)
# per_disk_stats with workers
reconstructor.reconstructor_workers = 1
old_total = total
total = 16.0
before = now
now += total * 60
with mock.patch('swift.obj.reconstructor.time.time',
return_value=now), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-1'):
reconstructor.final_recon_dump(total, override_devices=[
'sda', 'sdc'])
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': before,
'object_reconstruction_time': old_total,
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': now,
'object_reconstruction_time': total,
'pid': 'pid-1',
},
'sdc': {
'object_reconstruction_last': now,
'object_reconstruction_time': total,
'pid': 'pid-1',
},
},
}, data)
# and without workers we clear it out
reconstructor.reconstructor_workers = 0
total = 18.0
now += total * 60
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': now,
'object_reconstruction_time': total,
}, data)
def test_dump_recon_run_once_inline(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
reconstructor.reconstruct = mock.MagicMock()
now = time.time()
later = now + 300 # 5 mins
with mock.patch('swift.obj.reconstructor.time.time', side_effect=[
now, later, later]):
reconstructor.run_once()
# no override args passed to reconstruct
self.assertEqual([mock.call(
override_devices=[],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
# script mode with no override args, we expect recon dumps
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
}, data)
total = 10.0
later += total * 60
with mock.patch('swift.obj.reconstructor.time.time',
return_value=later):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 10.0,
}, data)
def test_dump_recon_run_once_in_worker(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path,
'reconstructor_workers': 1},
logger=self.logger)
reconstructor.get_local_devices = lambda: {'sda'}
now = time.time()
later = now + 300 # 5 mins
def do_test(run_kwargs, expected_device):
# get the actual kwargs that would be passed to run_once in a
# worker
run_once_kwargs = list(
reconstructor.get_worker_args(once=True, **run_kwargs))[0]
reconstructor.reconstruct = mock.MagicMock()
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]):
reconstructor.run_once(**run_once_kwargs)
self.assertEqual([mock.call(
override_devices=[expected_device],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
# no aggregate is written but perhaps it should be, in which
# case this assertion will need to change
'object_reconstruction_per_disk': {
expected_device: {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': mock.ANY
}
}
}, data)
# script mode with no CLI override args, we expect recon dumps
do_test({}, 'sda')
# script mode *with* CLI override devices, we expect recon dumps
os.unlink(self.rcache)
do_test(dict(devices='sda'), 'sda')
# if the override device is not in local devices we still get
# a recon dump, but it'll get cleaned up in the next aggregation
os.unlink(self.rcache)
do_test(dict(devices='sdz'), 'sdz')
# now disable workers and check that inline run_once updates rcache
# and clears out per disk stats
now = time.time()
later = now + 600 # 10 mins
reconstructor.reconstructor_workers = 0
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]):
reconstructor.run_once()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 10.0,
}, data)
def test_no_dump_recon_run_once(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
reconstructor.get_local_devices = lambda: {'sda', 'sdb', 'sdc'}
def do_test(run_once_kwargs, expected_devices, expected_partitions):
reconstructor.reconstruct = mock.MagicMock()
now = time.time()
later = now + 300 # 5 mins
with mock.patch('swift.obj.reconstructor.time.time', side_effect=[
now, later, later]):
reconstructor.run_once(**run_once_kwargs)
# override args passed to reconstruct
actual_calls = reconstructor.reconstruct.call_args_list
self.assertEqual({'override_devices', 'override_partitions'},
set(actual_calls[0][1]))
self.assertEqual(sorted(expected_devices),
sorted(actual_calls[0][1]['override_devices']))
self.assertEqual(sorted(expected_partitions),
sorted(actual_calls[0][1]['override_partitions']))
self.assertFalse(actual_calls[1:])
self.assertEqual(False, os.path.exists(self.rcache))
# inline mode with overrides never does recon dump
reconstructor.reconstructor_workers = 0
kwargs = {'devices': 'sda,sdb'}
do_test(kwargs, ['sda', 'sdb'], [])
# Have partition override, so no recon dump
kwargs = {'partitions': '1,2,3'}
do_test(kwargs, [], [1, 2, 3])
reconstructor.reconstructor_workers = 1
worker_kwargs = list(
reconstructor.get_worker_args(once=True, **kwargs))[0]
do_test(worker_kwargs, ['sda', 'sdb', 'sdc'], [1, 2, 3])
reconstructor.reconstructor_workers = 0
kwargs = {'devices': 'sda,sdb', 'partitions': '1,2,3'}
do_test(kwargs, ['sda', 'sdb'], [1, 2, 3])
reconstructor.reconstructor_workers = 1
worker_kwargs = list(
reconstructor.get_worker_args(once=True, **kwargs))[0]
do_test(worker_kwargs, ['sda', 'sdb'], [1, 2, 3])
# 'sdz' is not in local devices
reconstructor.reconstructor_workers = 0
kwargs = {'devices': 'sdz'}
do_test(kwargs, ['sdz'], [])
def test_run_forever_recon_aggregation(self):
class StopForever(Exception):
pass
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sda', 'sdb', 'sdc', 'sdd']
reconstructor.reconstruct = mock.MagicMock()
now = time.time()
later = now + 300 # 5 mins
worker_args = list(
# include 'devices' kwarg as a sanity check - it should be ignored
# in run_forever mode
reconstructor.get_worker_args(once=False, devices='sda'))
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-1'), \
mock.patch('swift.obj.reconstructor.sleep',
side_effect=[StopForever]), \
Timeout(.3), quiet_eventlet_exceptions(), \
self.assertRaises(StopForever):
gt = spawn(reconstructor.run_forever, **worker_args[0])
gt.wait()
# override args are passed to reconstruct
self.assertEqual([mock.call(
override_devices=['sda', 'sdc'],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
# forever mode with override args, we expect per-disk recon dumps
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdc': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
}
}, data)
reconstructor.reconstruct.reset_mock()
# another worker would get *different* disks
before = now = later
later = now + 300 # 5 more minutes
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-2'), \
mock.patch('swift.obj.reconstructor.sleep',
side_effect=[StopForever]), \
Timeout(.3), quiet_eventlet_exceptions(), \
self.assertRaises(StopForever):
gt = spawn(reconstructor.run_forever, **worker_args[1])
gt.wait()
# override args are parsed
self.assertEqual([mock.call(
override_devices=['sdb', 'sdd'],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
# forever mode with override args, we expect per-disk recon dumps
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdb': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
'sdc': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdd': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
}
}, data)
# aggregation is done in the parent thread even later
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 10.0,
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdb': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
'sdc': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdd': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
}
}, data)
def test_recon_aggregation_waits_for_all_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.all_local_devices = set([
'd0', 'd1', 'd2', 'd3',
# unreported device definitely matters
'd4'])
start = time.time() - 1000
for i in range(4):
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start + (300 * i)), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-%s' % i):
reconstructor.final_recon_dump(
i, override_devices=['d%s' % i])
# sanity
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 0.0,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
# unreported device d4 prevents aggregation
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertNotIn('object_reconstruction_last', data)
self.assertNotIn('object_reconstruction_time', data)
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
# it's idempotent
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertNotIn('object_reconstruction_last', data)
self.assertNotIn('object_reconstruction_time', data)
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
# remove d4, we no longer wait on it for aggregation
reconstructor.all_local_devices = set(['d0', 'd1', 'd2', 'd3'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual(start + 900, data['object_reconstruction_last'])
self.assertEqual(15, data['object_reconstruction_time'])
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
def test_recon_aggregation_removes_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.all_local_devices = set(['d0', 'd1', 'd2', 'd3'])
start = time.time() - 1000
for i in range(4):
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start + (300 * i)), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-%s' % i):
reconstructor.final_recon_dump(
i, override_devices=['d%s' % i])
# sanity
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 0.0,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
reconstructor.all_local_devices = set(['d0', 'd1', 'd2', 'd3'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual(start + 900, data['object_reconstruction_last'])
self.assertEqual(15, data['object_reconstruction_time'])
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
# it's idempotent
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 15,
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 0.0,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
# if a device is removed from the ring
reconstructor.all_local_devices = set(['d1', 'd2', 'd3'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
# ... it's per-disk stats are removed (d0)
self.assertEqual({
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 11,
'object_reconstruction_per_disk': {
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
# which can affect the aggregates!
reconstructor.all_local_devices = set(['d1', 'd2'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 6,
'object_reconstruction_per_disk': {
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
}
}, data)
def test_recon_aggregation_races_with_final_recon_dump(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.all_local_devices = set(['d0', 'd1'])
start = time.time() - 1000
# first worker dumps to recon cache
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-0'):
reconstructor.final_recon_dump(
1, override_devices=['d0'])
# sanity
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
}
}, data)
# simulate a second worker concurrently dumping to recon cache while
# parent is aggregatng existing results; mock dump_recon_cache as a
# convenient way to interrupt parent aggregate_recon_update and 'pass
# control' to second worker
updated_data = [] # state of recon cache just after second worker dump
def simulate_other_process_final_recon_dump():
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start + 999), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-1'):
reconstructor.final_recon_dump(
1000, override_devices=['d1'])
with open(self.rcache) as f:
updated_data.append(json.load(f))
def fake_dump_recon_cache(*args, **kwargs):
# temporarily put back real dump_recon_cache
with mock.patch('swift.obj.reconstructor.dump_recon_cache',
dump_recon_cache):
simulate_other_process_final_recon_dump()
# and now proceed with parent dump_recon_cache
dump_recon_cache(*args, **kwargs)
reconstructor.dump_recon_cache = fake_dump_recon_cache
with mock.patch('swift.obj.reconstructor.dump_recon_cache',
fake_dump_recon_cache):
reconstructor.aggregate_recon_update()
self.assertEqual([{ # sanity check - second process did dump its data
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'pid': 'pid-1',
},
}
}], updated_data)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'pid': 'pid-1',
},
}
}, data)
# next aggregation will find d1 stats
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'pid': 'pid-1',
},
}
}, data)
@patch_policies(with_ec_default=True)
class BaseTestObjectReconstructor(unittest.TestCase):
def setUp(self):
self.policy = POLICIES.default
self.policy.object_ring._rtime = time.time() + 3600
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'devices')
self.local_dev = self.policy.object_ring.devs[0]
self.ip = self.local_dev['replication_ip']
self.port = self.local_dev['replication_port']
self.conf = {
'devices': self.devices,
'mount_check': False,
'bind_ip': self.ip,
'bind_port': self.port,
}
self.logger = debug_logger('object-reconstructor')
self._configure_reconstructor()
self.policy.object_ring.max_more_nodes = \
self.policy.object_ring.replicas
self.ts_iter = make_timestamp_iter()
self.fabricated_ring = FabricatedRing(replicas=14, devices=28)
def _configure_reconstructor(self, **kwargs):
self.conf.update(kwargs)
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.reconstructor._reset_stats()
# some tests bypass build_reconstruction_jobs and go to process_job
# directly, so you end up with a /0 when you try to show the
# percentage of complete jobs as ratio of the total job count
self.reconstructor.job_count = 1
# if we ever let a test through without properly patching the
# REPLICATE and SSYNC calls - let's fail sort fast-ish
self.reconstructor.lockup_timeout = 3
def tearDown(self):
self.reconstructor._reset_stats()
self.reconstructor.stats_line()
shutil.rmtree(self.testdir)
def ts(self):
return next(self.ts_iter)
class TestObjectReconstructor(BaseTestObjectReconstructor):
def test_handoffs_only_default(self):
# sanity neither option added to default conf
self.conf.pop('handoffs_first', None)
self.conf.pop('handoffs_only', None)
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
def test_handoffs_first_enables_handoffs_only(self):
self.conf['handoffs_first'] = "True"
self.conf.pop('handoffs_only', None) # sanity
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor '
'of handoffs_only. This option may be ignored in a '
'future release.',
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_ignores_handoffs_first(self):
self.conf['handoffs_first'] = "True"
self.conf['handoffs_only'] = "False"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
'Ignored handoffs_first option in favor of handoffs_only.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_enabled(self):
self.conf.pop('handoffs_first', None) # sanity
self.conf['handoffs_only'] = "True"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_true_and_first_true(self):
self.conf['handoffs_first'] = "True"
self.conf['handoffs_only'] = "True"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_false_and_first_false(self):
self.conf['handoffs_only'] = "False"
self.conf['handoffs_first'] = "False"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_none_and_first_false(self):
self.conf['handoffs_first'] = "False"
self.conf.pop('handoffs_only', None) # sanity
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_false_and_first_none(self):
self.conf.pop('handoffs_first', None) # sanity
self.conf['handoffs_only'] = "False"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
self.assertFalse(warnings)
def test_handoffs_only_true_and_first_false(self):
self.conf['handoffs_first'] = "False"
self.conf['handoffs_only'] = "True"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_two_ec_policies(self):
with patch_policies([
StoragePolicy(0, name='zero', is_deprecated=True),
ECStoragePolicy(1, name='one', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=3),
ECStoragePolicy(2, name='two',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2)],
fake_ring_args=[
{}, {'replicas': 7}, {'replicas': 10}]):
self._configure_reconstructor()
jobs = []
def process_job(job):
jobs.append(job)
self.reconstructor.process_job = process_job
os.makedirs(os.path.join(self.devices, 'sda', 'objects-1', '0'))
self.reconstructor.run_once()
self.assertEqual(1, len(jobs))
def test_collect_parts_skips_non_ec_policy_and_device(self):
stub_parts = (371, 78, 419, 834)
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for part in stub_parts:
utils.mkdirs(os.path.join(
self.devices, self.local_dev['device'],
datadir, str(part)))
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
self.assertEqual(found_parts, sorted(stub_parts))
for part_info in part_infos:
self.assertEqual(part_info['local_dev'], self.local_dev)
self.assertEqual(part_info['policy'], self.policy)
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_skips_non_local_devs_servers_per_port(self):
self._configure_reconstructor(devices=self.devices, mount_check=False,
bind_ip=self.ip, bind_port=self.port,
servers_per_port=2)
device_parts = {
'sda': (374,),
'sdb': (179, 807), # w/one-serv-per-port, same IP alone is local
'sdc': (363, 468, 843),
'sdd': (912,), # "not local" via different IP
}
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdb', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port + 1 if dev == 'sdb' else self.port,
} for i, dev in enumerate(local_devs)]
stub_ring_devs.append({
'id': i + 1,
'device': 'sdd',
'replication_ip': '127.0.0.88', # not local via IP
'replication_port': self.port,
})
self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
expected_parts = sorted(itertools.chain(
*(device_parts[d] for d in local_devs)))
self.assertEqual(found_parts, expected_parts)
for part_info in part_infos:
self.assertEqual(part_info['policy'], self.policy)
self.assertIn(part_info['local_dev'], stub_ring_devs)
dev = part_info['local_dev']
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_multi_device_skips_non_non_local_devs(self):
device_parts = {
'sda': (374,),
'sdb': (179, 807), # "not local" via different port
'sdc': (363, 468, 843),
'sdd': (912,), # "not local" via different IP
}
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port,
} for i, dev in enumerate(local_devs)]
stub_ring_devs.append({
'id': i + 1,
'device': 'sdb',
'replication_ip': self.ip,
'replication_port': self.port + 1, # not local via port
})
stub_ring_devs.append({
'id': i + 2,
'device': 'sdd',
'replication_ip': '127.0.0.88', # not local via IP
'replication_port': self.port,
})
self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
expected_parts = sorted(itertools.chain(
*(device_parts[d] for d in local_devs)))
self.assertEqual(found_parts, expected_parts)
for part_info in part_infos:
self.assertEqual(part_info['policy'], self.policy)
self.assertIn(part_info['local_dev'], stub_ring_devs)
dev = part_info['local_dev']
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_multi_device_skips_non_ring_devices(self):
device_parts = {
'sda': (374,),
'sdc': (363, 468, 843),
}
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port,
} for i, dev in enumerate(local_devs)]
self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
expected_parts = sorted(itertools.chain(
*(device_parts[d] for d in local_devs)))
self.assertEqual(found_parts, expected_parts)
for part_info in part_infos:
self.assertEqual(part_info['policy'], self.policy)
self.assertIn(part_info['local_dev'], stub_ring_devs)
dev = part_info['local_dev']
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_mount_check(self):
# each device has one part in it
local_devs = ('sda', 'sdb')
for i, dev in enumerate(local_devs):
datadir = diskfile.get_data_dir(self.policy)
utils.mkdirs(os.path.join(
self.devices, dev, datadir, str(i)))
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for i, dev in enumerate(local_devs)]
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(2, len(part_infos)) # sanity
self.assertEqual(set(int(p['partition']) for p in part_infos),
set([0, 1]))
paths = []
def fake_check_mount(devices, device):
paths.append(os.path.join(devices, device))
return False
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.diskfile.check_mount',
fake_check_mount):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(2, len(part_infos)) # sanity, same jobs
self.assertEqual(set(int(p['partition']) for p in part_infos),
set([0, 1]))
# ... because ismount was not called
self.assertEqual(paths, [])
# ... now with mount check
self._configure_reconstructor(mount_check=True)
self.assertTrue(self.reconstructor.mount_check)
for policy in POLICIES:
self.assertTrue(self.reconstructor._df_router[policy].mount_check)
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.diskfile.check_mount',
fake_check_mount):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual([], part_infos) # sanity, no jobs
# ... because fake_ismount returned False for both paths
self.assertEqual(set(paths), set([
os.path.join(self.devices, dev) for dev in local_devs]))
def fake_check_mount(devices, device):
path = os.path.join(devices, device)
if path.endswith('sda'):
return True
else:
return False
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.diskfile.check_mount',
fake_check_mount):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(1, len(part_infos)) # only sda picked up (part 0)
self.assertEqual(part_infos[0]['partition'], 0)
def test_collect_parts_cleans_tmp(self):
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for i, dev in enumerate(local_devs)]
for device in local_devs:
utils.mkdirs(os.path.join(self.devices, device))
fake_unlink = mock.MagicMock()
self._configure_reconstructor(reclaim_age=1000)
now = time.time()
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch('swift.obj.reconstructor.time.time',
return_value=now), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.reconstructor.unlink_older_than',
fake_unlink):
self.assertEqual([], list(self.reconstructor.collect_parts()))
# each local device hash unlink_older_than called on it,
# with now - self.reclaim_age
tmpdir = diskfile.get_tmp_dir(self.policy)
expected = now - 1000
self.assertEqual(fake_unlink.mock_calls, [
mock.call(os.path.join(self.devices, dev, tmpdir), expected)
for dev in local_devs])
def test_collect_parts_creates_datadir(self):
# create just the device path
dev_path = os.path.join(self.devices, self.local_dev['device'])
utils.mkdirs(dev_path)
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
self.assertEqual([], list(self.reconstructor.collect_parts()))
datadir_path = os.path.join(dev_path,
diskfile.get_data_dir(self.policy))
self.assertTrue(os.path.exists(datadir_path))
def test_collect_parts_creates_datadir_error(self):
# create just the device path
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
utils.mkdirs(os.path.dirname(datadir_path))
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch('swift.obj.reconstructor.mkdirs',
side_effect=OSError('kaboom!')):
self.assertEqual([], list(self.reconstructor.collect_parts()))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1,
'Expected only one error, got %r' % error_lines)
line = error_lines[0]
self.assertIn('Unable to create', line)
self.assertIn(datadir_path, line)
def test_collect_parts_skips_invalid_paths(self):
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
utils.mkdirs(os.path.dirname(datadir_path))
with open(datadir_path, 'w') as f:
f.write('junk')
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
self.assertEqual([], list(self.reconstructor.collect_parts()))
self.assertTrue(os.path.exists(datadir_path))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1,
'Expected only one error, got %r' % error_lines)
line = error_lines[0]
self.assertIn('Unable to list partitions', line)
self.assertIn(datadir_path, line)
def test_reconstruct_removes_non_partition_files(self):
# create some junk next to partitions
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
num_parts = 3
for part in range(num_parts):
utils.mkdirs(os.path.join(datadir_path, str(part)))
# Add some clearly non-partition dentries
utils.mkdirs(os.path.join(datadir_path, 'not/a/partition'))
for junk_name in ('junk', '1234'):
junk_file = os.path.join(datadir_path, junk_name)
with open(junk_file, 'w') as f:
f.write('junk')
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch('swift.obj.reconstructor.'
'ObjectReconstructor.process_job'):
self.reconstructor.reconstruct()
# all the bad gets cleaned up
errors = []
for junk_name in ('junk', '1234', 'not'):
junk_file = os.path.join(datadir_path, junk_name)
if os.path.exists(junk_file):
errors.append('%s still exists!' % junk_file)
self.assertFalse(errors)
error_lines = self.logger.get_lines_for_level('warning')
self.assertIn('Unexpected entity in data dir: %r'
% os.path.join(datadir_path, 'not'), error_lines)
self.assertIn('Unexpected entity in data dir: %r'
% os.path.join(datadir_path, 'junk'), error_lines)
self.assertIn('Unexpected entity %r is not a directory'
% os.path.join(datadir_path, '1234'), error_lines)
self.assertEqual(self.reconstructor.reconstruction_part_count, 6)
def test_collect_parts_overrides(self):
# setup multiple devices, with multiple parts
device_parts = {
'sda': (374, 843),
'sdb': (179, 807),
'sdc': (363, 468, 843),
}
datadir = diskfile.get_data_dir(self.policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for i, dev in enumerate(local_devs)]
expected = (
({}, [
('sda', 374),
('sda', 843),
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sda', 'sdc']}, [
('sda', 374),
('sda', 843),
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sdc']}, [
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sda']}, [
('sda', 374),
('sda', 843),
]),
({'override_devices': ['sdx']}, []),
({'override_partitions': [374]}, [
('sda', 374),
]),
({'override_partitions': [843]}, [
('sda', 843),
('sdc', 843),
]),
({'override_partitions': [843], 'override_devices': ['sda']}, [
('sda', 843),
]),
)
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
for kwargs, expected_parts in expected:
part_infos = list(self.reconstructor.collect_parts(**kwargs))
expected_paths = set(
os.path.join(self.devices, dev, datadir, str(part))
for dev, part in expected_parts)
found_paths = set(p['part_path'] for p in part_infos)
msg = 'expected %r != %r for %r' % (
expected_paths, found_paths, kwargs)
self.assertEqual(expected_paths, found_paths, msg)
def test_build_jobs_creates_empty_hashes(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '0')
utils.mkdirs(part_path)
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 0,
'part_path': part_path,
}
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], 0)
self.assertEqual(job['suffixes'], [])
self.assertEqual(len(job['sync_to']), 2)
self.assertEqual(job['partition'], 0)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], {})
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
self.assertTrue(os.path.exists(hashes_file))
suffixes = self.reconstructor._get_hashes(
self.local_dev['device'], 0, self.policy, do_listdir=True)
self.assertEqual(suffixes, {})
def test_build_jobs_no_hashes(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '0')
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 0,
'part_path': part_path,
}
stub_hashes = {}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], 0)
self.assertEqual(job['suffixes'], [])
self.assertEqual(len(job['sync_to']), 2)
self.assertEqual(job['partition'], 0)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], {})
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
def test_build_jobs_primary(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a primary
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
try:
frag_index = [n['id'] for n in part_nodes].index(
self.local_dev['id'])
except ValueError:
pass
else:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(job['suffixes'], stub_hashes.keys())
self.assertEqual(set([n['index'] for n in job['sync_to']]),
set([(frag_index + 1) % ring.replicas,
(frag_index - 1) % ring.replicas]))
self.assertEqual(job['partition'], partition)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], stub_hashes)
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
def test_build_jobs_handoff(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a handoff
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
# since this part doesn't belong on us it doesn't matter what
# frag_index we have
frag_index = random.randint(0, self.policy.ec_n_unique_fragments - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs), 'Expected only one job, got %r' % jobs)
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.REVERT)
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(sorted(job['suffixes']), sorted(stub_hashes.keys()))
self.assertEqual(
self.policy.ec_duplication_factor, len(job['sync_to']))
# the sync_to node should be different each other
node_ids = set([node['id'] for node in job['sync_to']])
self.assertEqual(len(node_ids),
self.policy.ec_duplication_factor)
# but all the nodes have same backend index to sync
node_indexes = set(
self.policy.get_backend_index(node['index'])
for node in job['sync_to'])
self.assertEqual(1, len(node_indexes))
self.assertEqual(job['sync_to'][0]['index'], frag_index)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['partition'], partition)
self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
self.assertEqual(job['local_dev'], self.local_dev)
def test_build_jobs_mixed(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a primary
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
try:
node_index = [n['id'] for n in part_nodes].index(
self.local_dev['id'])
except ValueError:
pass
else:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
frag_index = self.policy.get_backend_index(node_index)
other_frag_index = random.choice(
[f for f in range(self.policy.ec_n_unique_fragments)
if f != node_index])
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'456': {other_frag_index: 'hash', None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(2, len(jobs))
sync_jobs, revert_jobs = [], []
for job in jobs:
self.assertEqual(job['partition'], partition)
self.assertEqual(job['path'], part_path)
self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
{
object_reconstructor.SYNC: sync_jobs,
object_reconstructor.REVERT: revert_jobs,
}[job['job_type']].append(job)
self.assertEqual(1, len(sync_jobs))
job = sync_jobs[0]
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(sorted(job['suffixes']), sorted(['123', 'abc']))
self.assertEqual(len(job['sync_to']), 2)
self.assertEqual(set([n['index'] for n in job['sync_to']]),
set([(frag_index + 1) % ring.replicas,
(frag_index - 1) % ring.replicas]))
self.assertEqual(1, len(revert_jobs))
job = revert_jobs[0]
self.assertEqual(job['frag_index'], other_frag_index)
self.assertEqual(job['suffixes'], ['456'])
self.assertEqual(len(job['sync_to']),
self.policy.ec_duplication_factor)
self.assertEqual(job['sync_to'][0]['index'], other_frag_index)
def test_build_jobs_revert_only_tombstones(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a handoff
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
# we have no fragment index to hint the jobs where they belong
stub_hashes = {
'123': {None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(len(jobs), 1, 'Expected only one job, got %r' % jobs)
job = jobs[0]
expected = {
'job_type': object_reconstructor.REVERT,
'frag_index': None,
'suffixes': stub_hashes.keys(),
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
self.assertEqual(ring.replica_count, len(part_nodes))
expected_samples = (
(self.policy.ec_n_unique_fragments *
self.policy.ec_duplication_factor) -
self.policy.ec_ndata + 1)
self.assertEqual(len(job['sync_to']), expected_samples)
for k, v in expected.items():
msg = 'expected %s != %s for %s' % (
v, job[k], k)
self.assertEqual(v, job[k], msg)
def test_get_suffix_delta(self):
# different
local_suff = {'123': {None: 'abc', 0: 'def'}}
remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
local_index = 0
remote_index = 0
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
# now the same
remote_suff = {'123': {None: 'abc', 0: 'def'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, [])
# now with a mis-matched None key (missing durable)
remote_suff = {'123': {None: 'ghi', 0: 'def'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
# now with bogus local index
local_suff = {'123': {None: 'abc', 99: 'def'}}
remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
def test_process_job_primary_in_sync(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {
'123': {left_index: 'hash', None: 'hash'},
'abc': {left_index: 'hash', None: 'hash'},
}
right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {
'123': {right_index: 'hash', None: 'hash'},
'abc': {right_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.2', '/sdc/0'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
self.assertFalse(ssync_calls)
def test_process_job_primary_not_in_sync(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {}
sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, left_hashes, right_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.1', '/sdb/0/123-abc'),
('10.0.0.2', '/sdc/0'),
('10.0.0.2', '/sdc/0/123-abc'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
expected_ssync_calls = sorted([
('10.0.0.1', 0, set(['123', 'abc'])),
('10.0.0.2', 0, set(['123', 'abc'])),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['partition'],
set(c['suffixes']),
) for c in ssync_calls))
def test_process_job_sync_missing_durable(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
# left hand side is in sync
left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {
'123': {left_index: 'hash', None: 'hash'},
'abc': {left_index: 'hash', None: 'hash'},
}
# right hand side has fragment, but no durable (None key is whack)
right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {
'123': {right_index: 'hash', None: 'hash'},
'abc': {right_index: 'hash', None: 'different-because-durable'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, right_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.2', '/sdc/0'),
('10.0.0.2', '/sdc/0/abc'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
expected_ssync_calls = sorted([
('10.0.0.2', 0, ['abc']),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['partition'],
c['suffixes'],
) for c in ssync_calls))
def test_process_job_primary_some_in_sync(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {
'123': {left_index: 'hashX', None: 'hash'},
'abc': {left_index: 'hash', None: 'hash'},
}
right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {
'123': {right_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, left_hashes, right_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.1', '/sdb/0/123'),
('10.0.0.2', '/sdc/0'),
('10.0.0.2', '/sdc/0/abc'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
self.assertEqual(
dict(collections.Counter(
(c['node']['index'], tuple(c['suffixes']))
for c in ssync_calls)),
{(left_index, ('123', )): 1,
(right_index, ('abc', )): 1})
def test_process_job_primary_down(self):
partition = 0
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_nodes = self.policy.object_ring.get_part_nodes(partition)
sync_to = part_nodes[:2]
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'device': self.local_dev['device'],
'local_dev': self.local_dev,
}
non_local = {'called': 0}
def ssync_response_callback(*args):
# in this test, ssync fails on the first (primary sync_to) node
if non_local['called'] >= 1:
return True, {}
non_local['called'] += 1
return False, {}
expected_suffix_calls = set()
for node in part_nodes[:3]:
expected_suffix_calls.update([
(node['replication_ip'], '/%s/0' % node['device']),
(node['replication_ip'], '/%s/0/123-abc' % node['device']),
])
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*[200] * len(expected_suffix_calls),
body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
expected_ssync_calls = sorted([
('10.0.0.0', 0, set(['123', 'abc'])),
('10.0.0.1', 0, set(['123', 'abc'])),
('10.0.0.2', 0, set(['123', 'abc'])),
])
found_ssync_calls = sorted((
c['node']['ip'],
c['job']['partition'],
set(c['suffixes']),
) for c in ssync_calls)
self.assertEqual(expected_ssync_calls, found_ssync_calls)
def test_process_job_suffix_call_errors(self):
partition = 0
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_nodes = self.policy.object_ring.get_part_nodes(partition)
sync_to = part_nodes[:2]
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'device': self.local_dev['device'],
'local_dev': self.local_dev,
}
expected_suffix_calls = set((
node['replication_ip'], '/%s/0' % node['device']
) for node in part_nodes)
possible_errors = [404, 507, Timeout(), Exception('kaboom!')]
codes = [random.choice(possible_errors)
for r in expected_suffix_calls]
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
self.assertFalse(ssync_calls)
def test_process_job_handoff(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(200, body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
(sync_to[0]['ip'], '/%s/0/123-abc' % sync_to[0]['device']),
])
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
self.assertEqual(
sorted(collections.Counter(
(c['node']['ip'], c['node']['port'], c['node']['device'],
tuple(sorted(c['suffixes'])))
for c in ssync_calls).items()),
[((sync_to[0]['ip'], sync_to[0]['port'], sync_to[0]['device'],
('123', 'abc')), 1)])
def test_process_job_will_not_revert_to_handoff(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
non_local = {'called': 0}
def ssync_response_callback(*args):
# in this test, ssync fails on the first (primary sync_to) node
if non_local['called'] >= 1:
return True, {}
non_local['called'] += 1
return False, {}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback), \
mocked_http_conn() as request_log:
self.reconstructor.process_job(job)
# failed ssync job should not generate a suffix rehash
self.assertEqual([], request_log.requests)
self.assertEqual(
sorted(collections.Counter(
(c['node']['ip'], c['node']['port'], c['node']['device'],
tuple(sorted(c['suffixes'])))
for c in ssync_calls).items()),
[((sync_to[0]['ip'], sync_to[0]['port'], sync_to[0]['device'],
('123', 'abc')), 1)])
def test_process_job_revert_is_handoff_fails(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
handoff_nodes = list(self.policy.object_ring.get_more_nodes(partition))
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': handoff_nodes[-1],
}
def ssync_response_callback(*args):
# in this test ssync always fails, until we encounter ourselves in
# the list of possible handoff's to sync to, so handoffs_remaining
# should increment
return False, {}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback), \
mocked_http_conn() as request_log:
self.reconstructor.process_job(job)
# failed ssync job should not generate a suffix rehash
self.assertEqual([], request_log.requests)
# this is ssync call to primary (which fails) and nothing else!
self.assertEqual(
sorted(collections.Counter(
(c['node']['ip'], c['node']['port'], c['node']['device'],
tuple(sorted(c['suffixes'])))
for c in ssync_calls).items()),
[((sync_to[0]['ip'], sync_to[0]['port'], sync_to[0]['device'],
('123', 'abc')), 1)])
self.assertEqual(self.reconstructor.handoffs_remaining, 1)
def test_process_job_revert_cleanup(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts = self.ts()
with df.create() as writer:
test_data = 'test data'
writer.write(test_data)
metadata = {
'X-Timestamp': ts.internal,
'Content-Length': len(test_data),
'Etag': md5(test_data).hexdigest(),
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
}
writer.put(metadata)
writer.commit(ts)
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
}
def ssync_response_callback(*args):
# success should not increment handoffs_remaining
return True, {ohash: {'ts_data': ts}}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
self.assertEqual([
(sync_to[0]['replication_ip'], '/%s/0/%s' % (
sync_to[0]['device'], suffix)),
], [
(r['ip'], r['path']) for r in request_log.requests
])
# hashpath is still there, but all files have been purged
files = os.listdir(df._datadir)
self.assertFalse(files)
# and more to the point, the next suffix recalc will clean it up
df_mgr = self.reconstructor._df_router[self.policy]
df_mgr.get_hashes(self.local_dev['device'], str(partition), [],
self.policy)
self.assertFalse(os.access(df._datadir, os.F_OK))
self.assertEqual(self.reconstructor.handoffs_remaining, 0)
def test_process_job_revert_cleanup_tombstone(self):
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts = self.ts()
df.delete(ts)
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': None,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
}
def ssync_response_callback(*args):
return True, {ohash: {'ts_data': ts}}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
self.assertEqual([
(sync_to[0]['replication_ip'], '/%s/0/%s' % (
sync_to[0]['device'], suffix)),
], [
(r['ip'], r['path']) for r in request_log.requests
])
# hashpath is still there, but it's empty
self.assertEqual([], os.listdir(df._datadir))
def test_get_local_devices(self):
local_devs = self.reconstructor.get_local_devices()
self.assertEqual({'sda'}, local_devs)
@patch_policies(legacy_only=True)
def test_get_local_devices_with_no_ec_policy_env(self):
# even no ec_policy found on the server, it runs just like as
# no ec device found
self._configure_reconstructor()
self.assertEqual([], self.reconstructor.policies)
local_devs = self.reconstructor.get_local_devices()
self.assertEqual(set(), local_devs)
@patch_policies(legacy_only=True)
def test_reconstruct_with_no_ec_policy_env(self):
self._configure_reconstructor()
self.assertEqual([], self.reconstructor.policies)
collect_parts_results = []
_orig_collect_parts = self.reconstructor.collect_parts
def capture_collect_parts(**kwargs):
part_infos = _orig_collect_parts(**kwargs)
collect_parts_results.append(part_infos)
return part_infos
with mock.patch.object(self.reconstructor, 'collect_parts',
capture_collect_parts):
self.reconstructor.reconstruct()
# There is one call, and it returns an empty list
self.assertEqual([[]], collect_parts_results)
log_lines = self.logger.all_log_lines()
self.assertEqual(log_lines, {'info': [mock.ANY]})
line = log_lines['info'][0]
self.assertTrue(line.startswith('Nothing reconstructed '), line)
class TestReconstructFragmentArchive(BaseTestObjectReconstructor):
obj_path = '/a/c/o' # subclass overrides this
def setUp(self):
super(TestReconstructFragmentArchive, self).setUp()
self.obj_timestamp = self.ts()
self.obj_metadata = {
'name': self.obj_path,
'Content-Length': '0',
'ETag': 'etag',
'X-Timestamp': self.obj_timestamp.normal
}
def test_reconstruct_fa_no_errors(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
# make a hook point at
# swift.obj.reconstructor.ObjectReconstructor._get_response
called_headers = []
orig_func = object_reconstructor.ObjectReconstructor._get_response
def _get_response_hook(self, node, part, path, headers, policy):
called_headers.append(headers)
return orig_func(self, node, part, path, headers, policy)
codes, body_iter, headers = zip(*responses)
get_response_path = \
'swift.obj.reconstructor.ObjectReconstructor._get_response'
with mock.patch(get_response_path, _get_response_hook):
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self.obj_metadata)
self.assertEqual(0, df.content_length)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
self.assertEqual(len(part_nodes) - 1, len(called_headers),
'Expected %d calls, got %r' % (len(part_nodes) - 1,
called_headers))
for called_header in called_headers:
called_header = HeaderKeyDict(called_header)
self.assertIn('Content-Length', called_header)
self.assertEqual(called_header['Content-Length'], '0')
self.assertIn('User-Agent', called_header)
user_agent = called_header['User-Agent']
self.assertTrue(user_agent.startswith('obj-reconstructor'))
self.assertIn('X-Backend-Storage-Policy-Index', called_header)
self.assertEqual(called_header['X-Backend-Storage-Policy-Index'],
self.policy)
self.assertIn('X-Backend-Fragment-Preferences', called_header)
self.assertEqual(
[{'timestamp': self.obj_timestamp.normal, 'exclude': []}],
json.loads(called_header['X-Backend-Fragment-Preferences']))
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_errors_works(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[4]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(4)
base_responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
base_responses.append((200, body, headers))
# since we're already missing a fragment a +2 scheme can only support
# one additional failure at a time
for error in (Timeout(), 404, Exception('kaboom!')):
responses = base_responses
error_index = random.randint(0, len(responses) - 1)
responses[error_index] = (error, '', '')
codes, body_iter, headers_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers_iter):
df = self.reconstructor.reconstruct_fa(
job, node, dict(self.obj_metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
def test_reconstruct_fa_error_with_invalid_header(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[4]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(4)
base_responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
base_responses.append((200, body, headers))
responses = base_responses
# force the test to exercise the handling of this bad response by
# sticking it in near the front
error_index = random.randint(0, self.policy.ec_ndata - 1)
status, body, headers = responses[error_index]
# one esoteric failure is a literal string 'None' in place of the
# X-Object-Sysmeta-EC-Frag-Index
stub_node_job = {'some_keys': 'foo', 'but_not': 'frag_index'}
headers['X-Object-Sysmeta-Ec-Frag-Index'] = str(
stub_node_job.get('frag_index'))
# oops!
self.assertEqual('None',
headers.get('X-Object-Sysmeta-Ec-Frag-Index'))
responses[error_index] = status, body, headers
codes, body_iter, headers_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers_iter):
df = self.reconstructor.reconstruct_fa(
job, node, dict(self.obj_metadata))
fixed_body = ''.join(df.reader())
# ... this bad response should be ignored like any other failure
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
def test_reconstruct_parity_fa_with_data_node_failure(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[-4]
# make up some data (trim some amount to make it unaligned with
# segment size)
test_data = ('rebuild' * self.policy.ec_segment_size)[:-454]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
# the scheme is 10+4, so this gets a parity node
broken_body = ec_archive_bodies.pop(-4)
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
for error in (Timeout(), 404, Exception('kaboom!')):
# grab a data node index
error_index = random.randint(0, self.policy.ec_ndata - 1)
responses[error_index] = (error, '', '')
codes, body_iter, headers_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers_iter):
df = self.reconstructor.reconstruct_fa(
job, node, dict(self.obj_metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
def test_reconstruct_fa_exceptions_fails(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
policy = self.policy
possible_errors = [Timeout(), Exception('kaboom!')]
codes = [random.choice(possible_errors) for i in
range(policy.object_ring.replicas - 1)]
with mocked_http_conn(*codes):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self.obj_metadata)
error_lines = self.logger.get_lines_for_level('error')
# # of replicas failed and one more error log to report not enough
# responses to reconstruct.
self.assertEqual(policy.object_ring.replicas, len(error_lines))
for line in error_lines[:-1]:
self.assertIn("Trying to GET", line)
self.assertIn(
'Unable to get enough responses (%s error responses)'
% (policy.object_ring.replicas - 1),
error_lines[-1],
"Unexpected error line found: %s" % error_lines[-1])
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_all_404s_fails(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
policy = self.policy
codes = [404 for i in range(policy.object_ring.replicas - 1)]
with mocked_http_conn(*codes):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self.obj_metadata)
error_lines = self.logger.get_lines_for_level('error')
# only 1 log to report not enough responses
self.assertEqual(1, len(error_lines))
self.assertIn(
'Unable to get enough responses (%s error responses)'
% (policy.object_ring.replicas - 1),
error_lines[0],
"Unexpected error line found: %s" % error_lines[0])
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_old_etag(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
# bad response
broken_body = ec_archive_bodies.pop(1)
ts = make_timestamp_iter()
bad_headers = get_header_frag_index(self, broken_body)
bad_headers.update({
'X-Object-Sysmeta-Ec-Etag': 'some garbage',
'X-Backend-Timestamp': next(ts).internal,
})
# good responses
responses = list()
t1 = next(ts).internal
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': t1})
responses.append((200, body, headers))
# include the one older frag with different etag in first responses
error_index = random.randint(0, self.policy.ec_ndata - 1)
error_headers = get_header_frag_index(self,
(responses[error_index])[1])
error_headers.update(bad_headers)
bad_response = (200, '', bad_headers)
responses[error_index] = bad_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self.obj_metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_new_etag(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
ts = make_timestamp_iter()
# good responses
responses = list()
t0 = next(ts).internal
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': t0})
responses.append((200, body, headers))
# sanity check before negative test
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, dict(self.obj_metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# one newer etag won't spoil the bunch
new_index = random.randint(0, self.policy.ec_ndata - 1)
new_headers = get_header_frag_index(self, (responses[new_index])[1])
new_headers.update({'X-Object-Sysmeta-Ec-Etag': 'some garbage',
'X-Backend-Timestamp': next(ts).internal})
new_response = (200, '', new_headers)
responses[new_index] = new_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, dict(self.obj_metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_etag_with_same_timestamp(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
# good responses
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
# sanity check before negative test
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, dict(self.obj_metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# a response at same timestamp but different etag won't spoil the bunch
# N.B. (FIXME). if we choose the first response as garbage, the
# reconstruction fails because all other *correct* frags will be
# assumed as garbage. To avoid the freaky failing set randint
# as [1, self.policy.ec_ndata - 1] to make the first response
# always have the correct etag to reconstruct
new_index = random.randint(1, self.policy.ec_ndata - 1)
new_headers = get_header_frag_index(self, (responses[new_index])[1])
new_headers.update({'X-Object-Sysmeta-Ec-Etag': 'some garbage'})
new_response = (200, '', new_headers)
responses[new_index] = new_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, dict(self.obj_metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# expect an error log but no warnings
error_log_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_log_lines))
self.assertIn(
'Mixed Etag (some garbage, %s) for 10.0.0.1:1001/sdb/0%s '
'policy#%s frag#1' %
(etag, self.obj_path.decode('utf8'), int(self.policy)),
error_log_lines[0])
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_not_enough_etags_fail(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
ec_archive_dict = dict()
ts = make_timestamp_iter()
# create 3 different ec bodies
for i in range(3):
body = test_data[i:]
archive_bodies = encode_frag_archive_bodies(self.policy, body)
# pop the index to the destination node
archive_bodies.pop(1)
ec_archive_dict[
(md5(body).hexdigest(), next(ts).internal)] = archive_bodies
responses = list()
# fill out response list by 3 different etag bodies
for etag, ts in itertools.cycle(ec_archive_dict):
body = ec_archive_dict[(etag, ts)].pop(0)
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': ts})
responses.append((200, body, headers))
if len(responses) >= (self.policy.object_ring.replicas - 1):
break
# sanity, there is 3 different etag and each etag
# doesn't have > ec_k bodies
etag_count = collections.Counter(
[in_resp_headers['X-Object-Sysmeta-Ec-Etag']
for _, _, in_resp_headers in responses])
self.assertEqual(3, len(etag_count))
for etag, count in etag_count.items():
self.assertLess(count, self.policy.ec_ndata)
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self.obj_metadata)
error_lines = self.logger.get_lines_for_level('error')
# 1 error log per etag to report not enough responses
self.assertEqual(3, len(error_lines))
for error_line in error_lines:
for expected_etag, ts in ec_archive_dict:
if expected_etag in error_line:
break
else:
self.fail(
"no expected etag %s found: %s" %
(list(ec_archive_dict), error_line))
# remove the found etag which should not be found in the
# following error lines
del ec_archive_dict[(expected_etag, ts)]
expected = 'Unable to get enough responses (%s/10) to ' \
'reconstruct 10.0.0.1:1001/sdb/0%s policy#0 ' \
'frag#1 with ETag' % \
(etag_count[expected_etag],
self.obj_path.decode('utf8'))
self.assertIn(
expected, error_line,
"Unexpected error line found: Expected: %s Got: %s"
% (expected, error_line))
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_finds_itself_does_not_fail(self):
# verify that reconstruction of a missing frag can cope with finding
# that missing frag in the responses it gets from other nodes while
# attempting to rebuild the missing frag
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
broken_node = random.randint(0, self.policy.ec_ndata - 1)
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
# instead of popping the broken body, we'll just leave it in the list
# of responses and take away something else.
broken_body = ec_archive_bodies[broken_node]
ec_archive_bodies = ec_archive_bodies[:-1]
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, part_nodes[broken_node], self.obj_metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# no error, no warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
# the found own frag will be reported in the debug message
debug_log_lines = self.logger.get_lines_for_level('debug')
# redundant frag found once in first ec_ndata responses
self.assertIn(
'Found existing frag #%s at' % broken_node,
debug_log_lines[0])
# N.B. in the future, we could avoid those check because
# definitely sending the copy rather than reconstruct will
# save resources. But one more reason, we're avoiding to
# use the dest index fragment even if it goes to reconstruct
# function is that it will cause a bunch of warning log from
# liberasurecode[1].
# 1: https://github.com/openstack/liberasurecode/blob/
# master/src/erasurecode.c#L870
log_prefix = 'Reconstruct frag #%s with frag indexes' % broken_node
self.assertIn(log_prefix, debug_log_lines[1])
self.assertFalse(debug_log_lines[2:])
got_frag_index_list = json.loads(
debug_log_lines[1][len(log_prefix):])
self.assertNotIn(broken_node, got_frag_index_list)
def test_reconstruct_fa_finds_duplicate_does_not_fail(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
# add some duplicates
num_duplicates = self.policy.ec_nparity - 1
ec_archive_bodies = (ec_archive_bodies[:num_duplicates] +
ec_archive_bodies)[:-num_duplicates]
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self.obj_metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
debug_log_lines = self.logger.get_lines_for_level('debug')
self.assertEqual(1, len(debug_log_lines))
expected_prefix = 'Reconstruct frag #1 with frag indexes'
self.assertIn(expected_prefix, debug_log_lines[0])
got_frag_index_list = json.loads(
debug_log_lines[0][len(expected_prefix):])
self.assertNotIn(1, got_frag_index_list)
def test_reconstruct_fa_missing_headers(self):
# This is much negative tests asserting when the expected
# headers are missing in the responses to gather fragments
# to reconstruct
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update(
{'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': self.obj_timestamp.internal})
return headers
def test_missing_header(missing_header, warning_extra):
self.logger._clear()
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
# To drop the header from the response[0], set None as the value
# explicitly instead of deleting the key because if no key exists
# in the dict, fake_http_connect will insert some key/value pairs
# automatically (e.g. X-Backend-Timestamp)
responses[0][2].update({missing_header: None})
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers) as mock_conn:
df = self.reconstructor.reconstruct_fa(
job, node, self.obj_metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# no errors
self.assertFalse(self.logger.get_lines_for_level('error'))
# ...but warning for the missing header
warning_log_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_log_lines))
path = unquote(
'%(ip)s:%(port)d%(path)s' % mock_conn.requests[0]
).encode('latin1').decode('utf8')
expected_warning = 'Invalid resp from %s policy#0%s' % (
path, warning_extra)
self.assertIn(expected_warning, warning_log_lines)
test_missing_header(
'X-Object-Sysmeta-Ec-Frag-Index',
' (invalid X-Object-Sysmeta-Ec-Frag-Index: None)')
test_missing_header(
'X-Object-Sysmeta-Ec-Etag',
', frag index 0 (missing Etag)')
test_missing_header(
'X-Backend-Timestamp',
', frag index 0 (missing X-Backend-Timestamp)')
def test_reconstruct_fa_invalid_frag_index_headers(self):
# This is much negative tests asserting when the expected
# ec frag index header has invalid value in the responses
# to gather fragments to reconstruct
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
def test_invalid_ec_frag_index_header(invalid_frag_index):
self.logger._clear()
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
responses[0][2].update({
'X-Object-Sysmeta-Ec-Frag-Index': invalid_frag_index})
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers) as mock_conn:
df = self.reconstructor.reconstruct_fa(
job, node, self.obj_metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# no errors
self.assertFalse(self.logger.get_lines_for_level('error'))
# ...but warning for the invalid header
warning_log_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_log_lines))
path = unquote(
'%(ip)s:%(port)d%(path)s' % mock_conn.requests[0]
).encode('latin1').decode('utf8')
expected_warning = (
'Invalid resp from %s policy#0 '
'(invalid X-Object-Sysmeta-Ec-Frag-Index: %r)'
% (path, invalid_frag_index))
self.assertIn(expected_warning, warning_log_lines)
for value in ('None', 'invalid'):
test_invalid_ec_frag_index_header(value)
@patch_policies(with_ec_default=True)
class TestReconstructFragmentArchiveUTF8(TestReconstructFragmentArchive):
# repeat superclass tests with an object path that contains non-ascii chars
obj_path = '/a/c/o\xc3\xa8'
@patch_policies([ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
ec_segment_size=4096,
ec_duplication_factor=2)],
fake_ring_args=[{'replicas': 28}])
class TestObjectReconstructorECDuplicationFactor(TestObjectReconstructor):
def setUp(self):
super(TestObjectReconstructorECDuplicationFactor, self).setUp()
self.fabricated_ring = FabricatedRing(replicas=28, devices=56)
def _test_reconstruct_with_duplicate_frags_no_errors(self, index):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[index]
metadata = {
'name': '/a/c/o',
'Content-Length': 0,
'ETag': 'etag',
'X-Timestamp': '1234567890.12345',
}
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(index)
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
# make a hook point at
# swift.obj.reconstructor.ObjectReconstructor._get_response
called_headers = []
orig_func = object_reconstructor.ObjectReconstructor._get_response
def _get_response_hook(self, node, part, path, headers, policy):
called_headers.append(headers)
return orig_func(self, node, part, path, headers, policy)
# need parity + 1 node failures to reach duplicated fragments
failed_start_at = (
self.policy.ec_n_unique_fragments - self.policy.ec_nparity - 1)
# set Timeout for node #9, #10, #11, #12, #13
for i in range(self.policy.ec_nparity + 1):
responses[failed_start_at + i] = (Timeout(), '', '')
codes, body_iter, headers = zip(*responses)
get_response_path = \
'swift.obj.reconstructor.ObjectReconstructor._get_response'
with mock.patch(get_response_path, _get_response_hook):
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
for called_header in called_headers:
called_header = HeaderKeyDict(called_header)
self.assertIn('Content-Length', called_header)
self.assertEqual(called_header['Content-Length'], '0')
self.assertIn('User-Agent', called_header)
user_agent = called_header['User-Agent']
self.assertTrue(user_agent.startswith('obj-reconstructor'))
def test_reconstruct_with_duplicate_frags_no_errors(self):
# any fragments can be broken
for index in range(28):
self._test_reconstruct_with_duplicate_frags_no_errors(index)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "272db24545e2af45288b506237252565",
"timestamp": "",
"source": "github",
"line_count": 4686,
"max_line_length": 79,
"avg_line_length": 43.275501493811355,
"alnum_prop": 0.5292742703006573,
"repo_name": "clayg/swift",
"id": "942703f11e19439af6043236849efe1d43245e12",
"size": "203383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/obj/test_reconstructor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "248"
},
{
"name": "PHP",
"bytes": "377"
},
{
"name": "Python",
"bytes": "8555598"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
import abc
import logging
class Logger(abc.ABC):
@abc.abstractmethod
def error(self, msg):
pass
@abc.abstractmethod
def warning(self, msg):
pass
@abc.abstractmethod
def info(self, msg):
pass
class PythonOnlyLogger(Logger):
def __init__(self, skip_logging_configuration=False):
self.logger = logging.getLogger("hail")
self.logger.setLevel(logging.INFO)
if not skip_logging_configuration:
logging.basicConfig()
def error(self, msg):
self.logger.error(msg)
def warning(self, msg):
self.logger.warning(msg)
def info(self, msg):
self.logger.info(msg)
| {
"content_hash": "3b1db79de87f556e319d39e790794b18",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 57,
"avg_line_length": 20.575757575757574,
"alnum_prop": 0.6215022091310751,
"repo_name": "danking/hail",
"id": "26301b714a48355a8f728fd612d40f5eccb3b986",
"size": "679",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "hail/python/hail/hail_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CSS",
"bytes": "29124"
},
{
"name": "Dockerfile",
"bytes": "13073"
},
{
"name": "Emacs Lisp",
"bytes": "252"
},
{
"name": "HTML",
"bytes": "151709"
},
{
"name": "Java",
"bytes": "32302"
},
{
"name": "JavaScript",
"bytes": "3309"
},
{
"name": "Jupyter Notebook",
"bytes": "162395"
},
{
"name": "Makefile",
"bytes": "73914"
},
{
"name": "Python",
"bytes": "4149266"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "9075"
},
{
"name": "Scala",
"bytes": "4426573"
},
{
"name": "Shell",
"bytes": "49103"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, parent)
import pydmmt
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Dynamic Meta Models Tools in Python'
copyright = u'2016, Emanuele Mason'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pydmmt.__version__
# The full version, including alpha/beta/rc tags.
release = pydmmt.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydmmtdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pydmmt.tex', u'Dynamic Meta Models Tools in Python Documentation',
u'Emanuele Mason', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pydmmt', u'Dynamic Meta Models Tools in Python Documentation',
[u'Emanuele Mason'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pydmmt', u'Dynamic Meta Models Tools in Python Documentation',
u'Emanuele Mason', 'pydmmt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "f936dc3e6680b73f25148efa22697b20",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 80,
"avg_line_length": 32.29045643153527,
"alnum_prop": 0.7047031611410949,
"repo_name": "Lordmzn/pydmmt",
"id": "e874735f88060179499c852922be658a874db446",
"size": "8203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1290"
},
{
"name": "Python",
"bytes": "39185"
}
],
"symlink_target": ""
} |
from contextlib import ContextDecorator, contextmanager
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
class TransactionManagementError(ProgrammingError):
"""Transaction management is used improperly."""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""Get the autocommit status of the connection."""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""Set the autocommit status of the connection."""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""Commit a transaction."""
get_connection(using).commit()
def rollback(using=None):
"""Roll back a transaction."""
get_connection(using).rollback()
def savepoint(using=None):
"""
Create a savepoint (if supported and required by the backend) inside the
current transaction. Return an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Roll back the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commit the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""Get the "needs rollback" flag -- for *advanced use* only."""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, trigger a rollback when exiting the innermost
enclosing atomic block that has `savepoint=True` (that's the default). Use
this to force a rollback without raising an exception.
When `rollback` is `False`, prevent such a rollback. Use this only after
rolling back to a known-good state! Otherwise, you break the atomic block
and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
@contextmanager
def mark_for_rollback_on_error(using=None):
"""
Internal low-level utility to mark a transaction as "needs rollback" when
an exception is raised while not enforcing the enclosed block to be in a
transaction. This is needed by Model.save() and friends to avoid starting a
transaction when in autocommit mode and a single query is executed.
It's equivalent to:
connection = get_connection(using)
if connection.get_autocommit():
yield
else:
with transaction.atomic(using=using, savepoint=False):
yield
but it uses low-level utilities to avoid performance overhead.
"""
try:
yield
except Exception:
connection = get_connection(using)
if connection.in_atomic_block:
connection.needs_rollback = True
raise
def on_commit(func, using=None):
"""
Register `func` to be called when the current transaction is committed.
If the current transaction is rolled back, `func` will not be called.
"""
get_connection(using).on_commit(func)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
Guarantee the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# sqlite3 in Python < 3.6 doesn't handle transactions and
# savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
connection.set_autocommit(False, force_begin_transaction_with_broken_autocommit=True)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
| {
"content_hash": "e0623dac525947eb6d476af04c40ba63",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 97,
"avg_line_length": 38.170347003154575,
"alnum_prop": 0.5946280991735537,
"repo_name": "frankvdp/django",
"id": "901d8b62e7a5152fd55754df42cd2f18ad71c154",
"size": "12100",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/db/transaction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174031"
},
{
"name": "JavaScript",
"bytes": "249623"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11310936"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
try:
# Try using ez_setup to install setuptools if not already installed.
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Ignore import error and assume Python 3 which already has setuptools.
pass
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Adafruit_CharLCD',
version = '1.1.1',
author = 'Tony DiCola',
author_email = '[email protected]',
description = 'Library to drive character LCD display and plate.',
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/adafruit/Adafruit_Python_CharLCD/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.4.0'],
install_requires = ['Adafruit-GPIO>=0.4.0'],
packages = find_packages())
| {
"content_hash": "1d46a2507d1d143373f21ec7802009d0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 118,
"avg_line_length": 44,
"alnum_prop": 0.5833333333333334,
"repo_name": "adafruit/Adafruit_Python_CharLCD",
"id": "5c82f4856b69ef7866090d84e1baea184f1ad0cd",
"size": "1320",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32501"
}
],
"symlink_target": ""
} |
learned_opt = "@HyperV2()"
sample_task_family = "phase_two_distribution"
init_params = "..."
# pyformat: disable
gin_params = {
"HyperV2.param_inits": 256,
"HyperV2.lstm_hidden_size": 512,
"HyperV2.use_bugged_loss_features": False,
"run_train.lopt": learned_opt,
"run_train.outer_learner_fn": "@GradientLearner",
"run_train.num_estimators": 8, # high number for more sync training.
"run_train.run_num_estimators_per_gradient": 1, # send gradients up every step!
"run_train.trainer_batch_size": 512,
"gradient_worker_compute.extra_metrics": False,
# Add gradient accumulation!
"run_train.staleness": 50, # at max 5 steps in the past.
"run_train.stochastic_resample_frequency": 1000,
"run_train.summary_every_n": 25,
"run_train.num_steps": 100_000,
"periodically_save_checkpoint.time_interval": 60,
"GradientLearner.init_theta_from_path": init_params,
"GradientLearner.reset_outer_iteration": True,
"GradientLearner.theta_opt": "@GradientClipOptimizer()",
"GradientClipOptimizer.opt": "@GradientAccumulator()",
"GradientAccumulator.opt": "@Adam()",
"Adam.learning_rate": 3e-4,
"GradientAccumulator.num_average": 10,
"GradientLearner.meta_init": learned_opt,
"build_gradient_estimators.sample_task_family_fn": f"@{sample_task_family}",
"build_gradient_estimators.gradient_estimator_fn": "@FullESOrPMAP",
"LogUniformLengthSchedule.min_length": 200,
"LogUniformLengthSchedule.max_length": 20000,
"VectorizedLOptTruncatedStep.trunc_sched": "@NeverEndingTruncationSchedule()",
"PMAPFullES.truncation_schedule": "@LogUniformLengthSchedule()",
"FullES.truncation_schedule": "@LogUniformLengthSchedule()",
"FullES.loss_type": "last_recompute",
"FullES.recompute_samples": 100,
"FullES.sign_delta_loss_scalar": 1.0,
"VectorizedLOptTruncatedStep.random_initial_iteration_offset": 0,
"VectorizedLOptTruncatedStep.num_tasks": 8,
}
gin_import = [
"learned_optimization.tasks.quadratics",
"learned_optimization.tasks.fixed.*",
"learned_optimization.research.hyper_lopt.tasks.*",
"learned_optimization.research.hyper_lopt.hyper_v2",
"learned_optimization.learned_optimizers.*",
"learned_optimization.optimizers.*",
"learned_optimization.outer_trainers.*",
"learned_optimization.research.hyper_lopt.gradient_estimators",
]
# configure the evaluation jobs.
# this will start up 3 eval workers per each job.
eval_param_list = [
{
"run_evaluation_chief.evaluation_set": "@eval_sample_task_family()",
"eval_sample_task_family.n_tasks": 2,
"eval_sample_task_family.seeds": 20,
"eval_sample_task_family.sample_task_family_name": sample_task_family,
"eval_sample_task_family.steps": 10000,
"eval_chief_config.num_workers": 20,
"eval_chief_config.chief_name": "chief_single_task10k",
"eval_chief_config.learned_opt": learned_opt,
},
{
"run_evaluation_chief.evaluation_set": "@eval_sample_task_family()",
"eval_sample_task_family.n_tasks": 2,
"eval_sample_task_family.seeds": 20,
"eval_sample_task_family.sample_task_family_name": sample_task_family,
"eval_sample_task_family.steps": 1000,
"eval_chief_config.num_workers": 20,
"eval_chief_config.chief_name": "chief_single_task1k",
"eval_chief_config.learned_opt": learned_opt,
},
{
"run_evaluation_chief.evaluation_set": "@eval_small_time_fixed()",
"eval_chief_config.num_workers": 50,
"eval_chief_config.chief_name": "eval_small_fixed",
"eval_chief_config.learned_opt": learned_opt,
"write_results_thread_main.values_to_metrics_fns": (
"metrics_fn_for_speedup_normalized",
"metrics_fn_for_each_task",
"metrics_fn_for_aggregate_normalized_losses",
"metrics_fn_for_aggregate_unnormalized_losses",
"metrics_fn_for_time",
"metrics_fn_for_checkpoint"
),
"multi_task_training_curves.n_eval_batches_vec": 5,
"multi_task_training_curves.n_eval_batches": 2,
"multi_task_training_curves.last_eval_batches": 40,
"multi_task_training_curves.eval_every": 200,
}
]
| {
"content_hash": "d556e3d257ab0309217ecd5c00d15a23",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 83,
"avg_line_length": 35.39495798319328,
"alnum_prop": 0.6783000949667616,
"repo_name": "google/learned_optimization",
"id": "aad5017f71c53f55db4dc6a90d46ca5969e67185",
"size": "4803",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "learned_optimization/research/general_lopt/configs/large_scale_phase2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "177493"
},
{
"name": "Python",
"bytes": "1290675"
}
],
"symlink_target": ""
} |
"""Tests for tree_format."""
| {
"content_hash": "2a1e556def24b6d32b3bb9db64c1504e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.6206896551724138,
"repo_name": "jml/tree-format",
"id": "7c31dadf7820645266f3cc78f865179e6777575c",
"size": "29",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tree_format/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14433"
}
],
"symlink_target": ""
} |
import tools.pluginmanager as pm
from DelegateBase import DelegateBase
from HandlerBase import HandlerBase
from Exceptions import *
from tools.cert_utils import *
from tools.chapi_log import *
from MethodContext import *
ma_logger = logging.getLogger('mav1')
# RPC handler for Member Authority (MA) API calls
class MAv1Handler(HandlerBase):
def __init__(self):
super(MAv1Handler, self).__init__(ma_logger)
def get_version(self, options={}):
"""Return version of MA API including object model
This call is unprotected: no checking of credentials
"""
with MethodContext(self, MA_LOG_PREFIX, 'get_version',
{}, [], options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.get_version(options, mc._session)
return mc._result
# Generic V2 service methods
def create(self, type, credentials, options):
if type == "MEMBER":
msg = "method create not supported for MEMBER"
result = self._errorReturn(CHAPIv1ArgumentError(msg))
elif type == "KEY":
result = self.create_key(credentials, options)
else:
msg = "Invalid type: %s" % (type)
result = self._errorReturn(CHAPIv1ArgumentError(msg))
return result
def update(self, type, urn, credentials, options):
if type == "MEMBER":
result = self.update_member_info(urn, credentials, options)
elif type == "KEY":
result = self.update_key(urn, credentials, options)
else:
msg = "Invalid type: %s" % (type)
result = self._errorReturn(CHAPIv1ArgumentError(msg))
return result
def delete(self, type, urn, credentials, options):
if type == "MEMBER":
msg = "method delete not supported for MEMBER"
result = self._errorReturn(CHAPIv1ArgumentError(msg))
elif type == "KEY":
result = self.delete_key(urn, credentials, options)
else:
msg = "Invalid type: %s" % (type)
result = self._errorReturn(CHAPIv1ArgumentError(msg))
return result
def lookup(self, type, credentials, options):
if not isinstance(options, dict):
msg = "Options argument must be dictionary"
return self._errorReturn(CHAPIv1ArgumentError(msg))
if type == "MEMBER":
result = self.lookup_allowed_member_info(credentials, options)
elif type == "KEY":
# In v1 we return a dictionary (indexed by member URN)
# of a list of dictionaries, one for each key of that user
# In v2 we return a dictioanry (indexed by KEY_ID)
# with a dictionary for that key
# Make sure we get the KEY_ID back
if 'filter' in options and 'KEY_ID' not in 'filter':
options['filter'].append('KEY_ID')
result = \
self.lookup_keys(credentials, options)
if result['code'] == NO_ERROR:
v2_result = {}
# chapi_info("LOOKUP", "RESULT = %s" % result)
for member_urn, key_infos in result['value'].items():
for key_info in key_infos:
# chapi_info("LOOKUP", "MURN = %s KEY_INFO = %s" % \
# (member_urn, key_info))
if 'KEY_MEMBER' not in key_info:
key_info['KEY_MEMBER'] = member_urn
key_id = key_info['KEY_ID']
v2_result[key_id] = key_info
result = self._successReturn(v2_result)
else:
msg = "Invalid type: %s" % (type)
result = self._errorReturn(CHAPIv1ArgumentError(msg))
return result
# MEMBER service methods
def lookup_allowed_member_info(self, credentials, options):
with MethodContext(self, MA_LOG_PREFIX, 'lookup_allowed_member_info',
{}, credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.lookup_allowed_member_info(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def lookup_public_member_info(self, credentials, options):
"""Return public information about members specified in options
filter and query fields
This call is unprotected: no checking of credentials
"""
with MethodContext(self, MA_LOG_PREFIX, 'lookup_public_member_info',
{}, credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.lookup_public_member_info(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def lookup_private_member_info(self, credentials, options):
"""Return private information about members specified in options
filter and query fields
This call is protected
Authorized by client cert and credentials
"""
with MethodContext(self, MA_LOG_PREFIX, 'lookup_private_member_info',
{}, credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.lookup_private_member_info(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def lookup_identifying_member_info(self, credentials, options):
"""Return identifying information about members specified in options
filter and query fields
This call is protected
Authorized by client cert and credentials
"""
with MethodContext(self, MA_LOG_PREFIX,
'lookup_identifying_member_info',
{}, credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.lookup_identifying_member_info(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def lookup_public_identifying_member_info(self, credentials, options):
"""Return both public and identifying information about members
specified in options filter and query fields
This call is protected
Authorized by client cert and credentials
"""
with MethodContext(self, MA_LOG_PREFIX,
'lookup_public_identifying_member_info',
{}, credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.lookup_public_identifying_member_info(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def lookup_login_info(self, credentials, options):
"""Return member public cert/key and private key for user by EPPN.
For authorities only.
This call is protected
Authorized by client cert and credentials
"""
with MethodContext(self, MA_LOG_PREFIX,
'lookup_login_info',
{}, credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.lookup_login_info(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def get_credentials(self, member_urn, credentials, options):
"""Get credentials for given user
This call is protected
Authorization based on client cert and given credentials
"""
with MethodContext(self, MA_LOG_PREFIX,
'get_credentials',
{'member_urn': member_urn},
credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.get_credentials(mc._client_cert,
member_urn,
credentials,
options,
mc._session)
return mc._result
def update_member_info(self, member_urn, credentials, options):
"""Update given member with new data provided in options
This call is protected
Authorized by client cert and credentials
"""
with MethodContext(self, MA_LOG_PREFIX,
'update_member_info',
{'member_urn': member_urn},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.update_member_info(mc._client_cert,
member_urn,
credentials,
options,
mc._session)
return mc._result
def create_member(self, attributes, credentials, options):
"""Create a new member using the specified attributes. Attribute
email is required. Returns the attributes of the resulting member
record, including the uid and urn.
This call is protected
Authorized by client cert and credentials
"""
with MethodContext(self, MA_LOG_PREFIX,
'create_member',
{'attributes': attributes},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.create_member(mc._client_cert,
attributes,
credentials,
options,
mc._session)
return mc._result
# KEY service methods
def create_key(self, credentials, options):
"""Create a record for a key pair for given member
Arguments:
member_urn: URN of member for which to retrieve credentials
options: 'fields' containing the fields for the key pair being
stored
Return:
Dictionary of name/value pairs for created key record
including the KEY_ID
Should return DUPLICATE_ERROR if a key with the same KEY_ID is
already stored for given user
"""
with MethodContext(self, MA_LOG_PREFIX,
'create_key',
{}, credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.create_key(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def delete_key(self, key_id, credentials, options):
"""Delete a specific key pair for given member
Arguments:
key_id: KEY_ID (unique for member/key fingerprint) of key(s) to
be deleted
Return:
True if succeeded
Should return ARGUMENT_ERROR if no such key is found for user
"""
with MethodContext(self, MA_LOG_PREFIX,
'delete_key',
{'key_id': key_id},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.delete_key(mc._client_cert,
key_id,
credentials,
options,
mc._session)
return mc._result
def update_key(self, key_id, credentials, options):
"""
Update the details of a key pair for given member
Arguments:
member_urn: urn of member for which to delete key pair
key_id: KEY_ID (fingerprint) of key pair to be deleted
options: 'fields' containing fields for key pairs that are permitted
for update
Return:
True if succeeded
Should return ARGUMENT_ERROR if no such key is found for user
"""
with MethodContext(self, MA_LOG_PREFIX,
'update_key',
{'key_id': key_id},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.update_key(mc._client_cert,
key_id,
credentials,
options,
mc._session)
return mc._result
def lookup_keys(self, credentials, options):
"""Lookup keys for given match criteria return fields in given
# filter criteria
#
# Arguments:
# options: 'match' for query match criteria, 'filter' for fields
# to be returned
# Return:
# Dictionary (indexed by member_urn) of dictionaries containing
# name/value pairs for all keys registered for that given user.
"""
with MethodContext(self, MA_LOG_PREFIX,
'lookup_keys',
{}, credentials, options, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.lookup_keys(mc._client_cert,
credentials,
options,
mc._session)
return mc._result
def create_certificate(self, member_urn, credentials, options):
"""Methods for managing user certs
# options:
# 'csr' => certificate signing request (if null, create cert/key)
"""
with MethodContext(self, MA_LOG_PREFIX,
'create_certificate',
{'member_urn': member_urn},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.create_certificate(mc._client_cert,
member_urn,
credentials,
options,
mc._session)
return mc._result
# ClientAuth API
def list_clients(self):
"""
"""
with MethodContext(self, MA_LOG_PREFIX,
'list_clients',
{}, [], {}, read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.list_clients(mc._client_cert, mc._session)
return mc._result
def list_authorized_clients(self, member_id):
"""
"""
with MethodContext(self, MA_LOG_PREFIX,
'list_authorized_clients',
{'member_id': member_id}, [], {},
read_only=True) as mc:
if not mc._error:
mc._result = \
self._delegate.list_authorized_clients(mc._client_cert,
member_id,
mc._session)
return mc._result
def authorize_client(self, member_id, client_urn, authorize_sense):
"""
"""
with MethodContext(self, MA_LOG_PREFIX,
'authorize_client',
{'member_id': member_id,
'client_urn': client_urn,
'authorize_sense': authorize_sense},
[], {}, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.authorize_client(mc._client_cert,
member_id,
client_urn,
authorize_sense,
mc._session)
return mc._result
# member disable API
def enable_user(self, member_urn, enable_sense, credentials, options):
"""Enable or disable a user based on URN. If enable_sense is False, then user
will be disabled.
"""
with MethodContext(self, MA_LOG_PREFIX,
'enable_user',
{'member_urn': member_urn,
'enable_sense': enable_sense},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.enable_user(mc._client_cert,
member_urn,
enable_sense,
credentials,
options,
mc._session)
return mc._result
# member privilege (private)
def add_member_privilege(self, member_uid, privilege, credentials,
options):
"""Add a privilege to a member.
privilege is either OPERATOR or PROJECT_LEAD
"""
with MethodContext(self, MA_LOG_PREFIX,
'add_member_privilege',
{'member_uid': member_uid, 'privilege': privilege},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.add_member_privilege(mc._client_cert,
member_uid,
privilege,
credentials,
options,
mc._session)
return mc._result
def revoke_member_privilege(self, member_uid, privilege, credentials,
options):
"""Revoke a privilege for a member."""
with MethodContext(self, MA_LOG_PREFIX,
'revoke_member_privilege',
{'member_uid': member_uid, 'privilege': privilege},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.revoke_member_privilege(mc._client_cert,
member_uid,
privilege,
credentials,
options,
mc._session)
return mc._result
def add_member_attribute(self,
member_urn, name, value, self_asserted,
credentials, options):
"""Add an attribute to member"""
with MethodContext(self, MA_LOG_PREFIX,
'add_member_attribute',
{'member_urn': member_urn,
'name': name, 'value': value,
'self_asserted': self_asserted},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.add_member_attribute(mc._client_cert,
member_urn,
name,
value,
self_asserted,
credentials,
options,
mc._session)
return mc._result
def remove_member_attribute(self,
member_urn, name,
credentials, options, value=None):
"""Remove attribute to member"""
with MethodContext(self, MA_LOG_PREFIX,
'remove_member_attribute',
{'member_urn': member_urn, 'name': name,
'value': value},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.remove_member_attribute(mc._client_cert,
member_urn,
name,
credentials,
options,
mc._session, value)
return mc._result
def swap_identities(self, source_urn, dest_urn, credentials, options):
"""Swap identities by making this user point to the identity with
the matching nonce.
"""
with MethodContext(self, MA_LOG_PREFIX,
'swap_identities',
{'source_urn': source_urn, 'dest_urn': dest_urn},
credentials, options, read_only=False) as mc:
if not mc._error:
mc._result = \
self._delegate.swap_identities(mc._client_cert, source_urn,
dest_urn, credentials,
options, mc._session)
return mc._result
# Base class for implementations of MA API
# Must be implemented in a derived class, and that derived class
# must call setDelegate on the handler
class MAv1DelegateBase(DelegateBase):
def __init__(self):
super(MAv1DelegateBase, self).__init__(ma_logger)
# This call is unprotected: no checking of credentials
def get_version(self, options):
raise CHAPIv1NotImplementedError('')
# MEMBER service methods
# This is a generic lookup_member_info call
# You get all the info you are allowed to see
# All public (for anyone)
# Identifying (for those allowed by policy)
# Private (only for you)
def lookup_allowed_member_info(self, client_cert, credentials, options, session):
raise CHAPIv1NotImplementedError('')
# This call is unprotected: no checking of credentials
def lookup_public_member_info(self, client_cert,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
# This call is protected
def lookup_private_member_info(self, client_cert,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
# This call is protected
def lookup_identifying_member_info(self, client_cert,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
# This call is protected
def lookup_public_identifying_member_info(self, credentials, options):
raise CHAPIv1NotImplementedError('')
# This call is protected
def lookup_login_info(self, client_cert, credentials, options, session):
raise CHAPIv1NotImplementedError('')
# This call is protected
def get_credentials(self, client_cert, member_urn,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
# This call is protected
def update_member_info(self, client_cert, member_urn,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
# This call is protected
def create_member(self, client_cert, attributes,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
# KEY service methods
def create_key(self, client_cert, credentials, options, session):
raise CHAPIv1NotImplementedError('')
def delete_key(self, client_cert, key_id,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
def update_key(self, client_cert, key_id,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
def lookup_keys(self, client_cert, credentials, options, session):
raise CHAPIv1NotImplementedError('')
# Member certificate methods
def create_certificate(self, client_cert, member_urn, \
credentials, options, session):
raise CHAPIv1NotImplementedError('')
# ClientAuth methods
def list_clients(self, client_cert, session):
raise CHAPIv1NotImplementedError('')
# List of URN's of all tools for which a given user (by ID) has
# authorized use and has generated inside keys
def list_authorized_clients(self, client_cert, member_id, session):
raise CHAPIv1NotImplementedError('')
# Authorize/deauthorize a tool with respect to a user
def authorize_client(self, client_cert, member_id, \
client_urn, authorize_sense, session):
raise CHAPIv1NotImplementedError('')
# Private API
def enable_user(self, client_cert, member_urn, enable_sense,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
def add_member_privilege(self, client_cert, member_uid, privilege,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
def revoke_member_privilege(self, client_cert, member_uid, privilege,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
def add_member_attribute(self, client_cert, member_urn, att_name,
att_value, att_self_asserted,
credentials, options, session):
raise CHAPIv1NotImplementedError('')
def remove_member_attribute(self, client_cert, member_urn, att_name, \
credentials, options, session, att_value=None):
raise CHAPIv1NotImplementedError('')
def swap_identities(self, client_cert, source_urn, dest_urn, credentials,
options, session):
raise CHAPIv1NotImplementedError('')
| {
"content_hash": "8f05462ca882f15d52a4628f27d895d5",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 89,
"avg_line_length": 44.40372670807454,
"alnum_prop": 0.4738075255280459,
"repo_name": "tcmitchell/geni-ch",
"id": "ec76ab84de3bef51c35221b64d9bfacc7d9471e4",
"size": "29816",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/chapiv1rpc/chapi/MemberAuthority.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M4",
"bytes": "889"
},
{
"name": "Makefile",
"bytes": "14097"
},
{
"name": "PLSQL",
"bytes": "283"
},
{
"name": "Python",
"bytes": "721711"
},
{
"name": "Shell",
"bytes": "34489"
}
],
"symlink_target": ""
} |
import functools
from oslo.config import cfg
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova import policy
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
policy.enforce(context, _action, target)
class API(base_api.NetworkAPI):
"""API for doing networking via the nova-network network manager.
This is a pluggable module - other implementations do networking via
other services (such as Neutron).
"""
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
helper = utils.ExceptionHelper
# NOTE(vish): this local version of floating_manager has to convert
# ClientExceptions back since they aren't going over rpc.
self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
"""Get all the networks.
If it is an admin user then api will return all the
networks. If it is a normal user and nova Flat or FlatDHCP
networking is being used then api will return all
networks. Otherwise api will only return the networks which
belong to the user's project.
"""
if "nova.network.manager.Flat" in CONF.network_manager:
project_only = "allow_none"
else:
project_only = True
try:
return objects.NetworkList.get_all(context,
project_only=project_only)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
return objects.Network.get_by_uuid(context.elevated(), network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
return self.network_rpcapi.create_networks(context, **kwargs)
@wrap_check_policy
def delete(self, context, network_uuid):
return self.network_rpcapi.delete_network(context, network_uuid, None)
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
objects.Network.disassociate(context, network.id,
host=True, project=True)
@wrap_check_policy
def get_fixed_ip(self, context, id):
return objects.FixedIP.get_by_id(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
return objects.FixedIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
if not utils.is_int_like(id):
raise exception.InvalidID(id=id)
return objects.FloatingIP.get_by_id(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
return objects.FloatingIP.get_pool_names(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
return objects.FloatingIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
return objects.FloatingIPList.get_by_project(context,
context.project_id)
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
floating_ips = objects.FloatingIPList.get_by_fixed_address(
context, fixed_address)
return [str(floating_ip.address) for floating_ip in floating_ips]
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip.instance_uuid
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
for vif in vifs:
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
vif = objects.VirtualInterface.get_by_address(context,
mac_address)
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vif
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating ip to a project from a pool."""
return self.floating_manager.allocate_floating_ip(context,
context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating ip.
This api call was added to allow this to be done in one operation
if using neutron.
"""
address = floating_ip['address']
if floating_ip.get('fixed_ip_id'):
try:
self.disassociate_floating_ip(context, instance, address)
except exception.FloatingIpNotAssociated:
msg = ("Floating ip %s has already been disassociated, "
"perhaps by another concurrent action.") % address
LOG.debug(msg)
# release ip from project
return self.release_floating_ip(context, address)
@wrap_check_policy
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Ensures floating ip is allocated to the project in context.
Does not verify ownership of the fixed ip. Caller is assumed to have
checked that the instance is properly owned.
"""
orig_instance_uuid = self.floating_manager.associate_floating_ip(
context, floating_address, fixed_address, affect_auto_assigned)
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
@wrap_check_policy
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
return self.floating_manager.disassociate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@base_api.refresh_cache
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:returns: network info as from get_instance_nw_info() below
"""
# NOTE(vish): We can't do the floating ip allocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
flavor = flavors.extract_flavor(instance)
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
args['instance_id'] = instance.uuid
args['project_id'] = instance.project_id
args['host'] = instance.host
args['rxtx_factor'] = flavor['rxtx_factor']
args['macs'] = macs
args['dhcp_options'] = dhcp_options
nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
# NOTE(vish): We can't do the floating ip deallocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
if not isinstance(instance, obj_base.NovaObject):
instance = objects.Instance._from_db_object(context,
objects.Instance(), instance)
self.network_rpcapi.deallocate_for_instance(context, instance=instance,
requested_networks=requested_networks)
# NOTE(danms): Here for neutron compatibility
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def deallocate_port_for_instance(self, context, instance, port_id):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def list_ports(self, *args, **kwargs):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def show_port(self, *args, **kwargs):
raise NotImplementedError()
@wrap_check_policy
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'network_id': network_id}
nw_info = self.network_rpcapi.add_fixed_ip_to_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'address': address}
nw_info = self.network_rpcapi.remove_fixed_ip_from_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
@wrap_check_policy
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate or disassociate host or project to network."""
network = self.get(context, network_uuid)
if host is not base_api.SENTINEL:
if host is None:
objects.Network.disassociate(context, network.id,
host=True, project=False)
else:
network.host = host
network.save()
if project is not base_api.SENTINEL:
if project is None:
objects.Network.disassociate(context, network.id,
host=False, project=True)
else:
objects.Network.associate(context, project,
network_id=network.id, force=True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
result = self._get_instance_nw_info(context, instance)
# NOTE(comstud): Don't update API cell with new info_cache every
# time we pull network info for an instance. The periodic healing
# of info_cache causes too many cells messages. Healing the API
# will happen separately.
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'project_id': instance['project_id']}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
if requested_networks:
self.network_rpcapi.validate_networks(context,
requested_networks)
# Neutron validation checks and returns how many of num_instances
# instances can be supported by the quota. For Nova network
# this is part of the subsequent quota check, so we just return
# the requested number in this case.
return num_instances
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
# This is NOOP for Nova network since it doesn't support SR-IOV.
pass
@wrap_check_policy
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Returns a list of dicts in the form of
{'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
"""
return self.network_rpcapi.get_instance_uuids_by_ip_filter(context,
filters)
@wrap_check_policy
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
return self.network_rpcapi.get_dns_domains(context)
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return self.network_rpcapi.add_dns_entry(context, **args)
@wrap_check_policy
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
return self.network_rpcapi.modify_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.delete_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
@wrap_check_policy
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@wrap_check_policy
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
args = {'domain': domain, 'av_zone': availability_zone}
return self.network_rpcapi.create_private_dns_domain(context, **args)
@wrap_check_policy
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
args = {'domain': domain, 'project': project}
return self.network_rpcapi.create_public_dns_domain(context, **args)
@wrap_check_policy
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
host = host or instance['host']
# NOTE(tr3buchet): host is passed in cases where we need to setup
# or teardown the networks on a host which has been migrated to/from
# and instance['host'] is not yet or is no longer equal to
args = {'instance_id': instance['id'],
'host': host,
'teardown': teardown}
self.network_rpcapi.setup_networks_on_host(context, **args)
def _is_multi_host(self, context, instance):
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance['uuid'])
except exception.FixedIpNotFoundForInstance:
return False
network = objects.Network.get_by_id(context,
fixed_ips[0].network_id,
project_only='allow_none')
return network.multi_host
def _get_floating_ip_addresses(self, context, instance):
return objects.FloatingIP.get_addresses_by_instance(
context, instance)
@wrap_check_policy
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
flavor = flavors.extract_flavor(instance)
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=flavor['rxtx_factor'],
project_id=instance['project_id'],
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@wrap_check_policy
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
flavor = flavors.extract_flavor(instance)
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=flavor['rxtx_factor'],
project_id=instance['project_id'],
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
| {
"content_hash": "26cc7e896e57107b2262e0f955b8d266",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 79,
"avg_line_length": 42.23105360443623,
"alnum_prop": 0.6048496520330897,
"repo_name": "tianweizhang/nova",
"id": "5bffda6a06b8834f6e5361cca8810795e19e88f2",
"size": "23668",
"binary": false,
"copies": "6",
"ref": "refs/heads/v0",
"path": "nova/network/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16708379"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259645"
}
],
"symlink_target": ""
} |
from neutron.api.v2 import attributes as attr
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.db import securitygroups_db
from neutron.extensions import portsecurity as psec
from neutron.extensions import securitygroup as ext_sg
from neutron.manager import NeutronManager
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_extension_security_group
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_portsecurity.'
'PortSecurityTestPlugin')
class PortSecurityTestCase(
test_extension_security_group.SecurityGroupsTestCase,
test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None):
ext_mgr = (
test_extension_security_group.SecurityGroupTestExtensionManager())
super(PortSecurityTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
# Check if a plugin supports security groups
plugin_obj = NeutronManager.get_plugin()
self._skip_security_group = ('security-group' not in
plugin_obj.supported_extension_aliases)
def tearDown(self):
super(PortSecurityTestCase, self).tearDown()
self._skip_security_group = None
class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin,
portsecurity_db.PortSecurityDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups and port security.
"""
supported_extension_aliases = ["security-group", "port-security"]
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).create_network(
context, network)
neutron_db.update(network['network'])
self._process_network_port_security_create(
context, network['network'], neutron_db)
return neutron_db
def update_network(self, context, id, network):
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).update_network(
context, id, network)
if psec.PORTSECURITY in network['network']:
self._process_network_port_security_update(
context, network['network'], neutron_db)
return neutron_db
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
net = super(PortSecurityTestPlugin, self).get_network(
context, id)
return self._fields(net, fields)
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port(
context, port)
neutron_db = super(PortSecurityTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, p)
p[psec.PORTSECURITY] = port_security
self._process_port_port_security_create(context, p, neutron_db)
if (attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and
not (port_security and has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port requires ip and port_security enabled for security group
if has_ip and port_security:
self._ensure_default_security_group_on_port(context, port)
if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]):
self._process_port_create_security_group(
context, p, p[ext_sg.SECURITYGROUPS])
return port['port']
def update_port(self, context, id, port):
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
with context.session.begin(subtransactions=True):
ret_port = super(PortSecurityTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
# populate port_security setting
if psec.PORTSECURITY not in ret_port:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if (has_security_groups and (not ret_port[psec.PORTSECURITY]
or not has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port security/IP was updated off. Need to check that no security
# groups are on port.
if (ret_port[psec.PORTSECURITY] != True or not has_ip):
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# get security groups on port
filters = {'port_id': [id]}
security_groups = (super(PortSecurityTestPlugin, self).
_get_port_security_group_bindings(
context, filters))
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
# process port create sec groups needs port id
port['id'] = id
self._process_port_create_security_group(context,
ret_port, sgids)
if psec.PORTSECURITY in port['port']:
self._process_port_port_security_update(
context, port['port'], ret_port)
return ret_port
class PortSecurityDBTestCase(PortSecurityTestCase):
def setUp(self, plugin=None):
plugin = plugin or DB_PLUGIN_KLASS
super(PortSecurityDBTestCase, self).setUp(plugin)
class TestPortSecurity(PortSecurityDBTestCase):
def test_create_network_with_portsecurity_mac(self):
res = self._create_network('json', 'net1', True)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
def test_create_network_with_portsecurity_false(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_updating_network_port_security(self):
res = self._create_network('json', 'net1', True,
port_security_enabled='True')
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
update_net = {'network': {psec.PORTSECURITY: False}}
req = self.new_update_request('networks', update_net,
net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
req = self.new_show_request('networks', net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_create_port_default_true(self):
with self.network() as net:
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_passing_true(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=True)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_on_port_security_false_network(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self._delete('ports', port['port']['id'])
def test_create_port_security_overrides_network_value(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_fails_with_secgroup_and_port_security_false(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
security_group = self.deserialize(
'json',
self._create_security_group(self.fmt, 'asdf', 'asdf'))
security_group_id = security_group['security_group']['id']
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
security_groups=[security_group_id],
port_security_enabled=False)
self.assertEqual(res.status_int, 400)
def test_create_port_with_default_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_create_port_with_security_group_and_net_sec_false(self):
# This tests that port_security_enabled is true when creating
# a port on a network that is marked as port_security_enabled=False
# that has a subnet and securiy_groups are passed it.
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf'))
security_group_id = security_group['security_group']['id']
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',),
security_groups=[security_group_id])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port']['security_groups'], [security_group_id])
self._delete('ports', port['port']['id'])
def test_update_port_security_off_with_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
update_port = {'port': {psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 0)
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group_read(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
sg_id = port['port'][ext_sg.SECURITYGROUPS]
update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]],
psec.PORTSECURITY: True}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_create_port_security_off_shared_network(self):
with self.network(shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=False,
tenant_id='not_network_owner',
set_context=True)
self.deserialize('json', res)
self.assertEqual(res.status_int, 403)
def test_update_port_security_off_shared_network(self):
with self.network(shared=True, do_delete=False) as net:
with self.subnet(network=net, do_delete=False):
res = self._create_port('json', net['network']['id'],
tenant_id='not_network_owner',
set_context=True)
port = self.deserialize('json', res)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
req.environ['neutron.context'] = context.Context(
'', 'not_network_owner')
res = req.get_response(self.api)
# TODO(salvatore-orlando): Expected error is 404 because
# the current API controller always returns this error
# code for any policy check failures on update.
# It should be 404 when the caller cannot access the whole
# resource, and 403 when it cannot access a single attribute
self.assertEqual(res.status_int, 404)
| {
"content_hash": "687599a450bf95ce67c4e919feab9adf",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 79,
"avg_line_length": 50.454787234042556,
"alnum_prop": 0.5586421380001054,
"repo_name": "Juniper/neutron",
"id": "c336db23a853ee052a1911d50ef2a9e9f0c7d531",
"size": "19563",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_extension_portsecurity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8281839"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import os.path
import warnings
from glob import glob
from io import BytesIO
from numbers import Number
from pathlib import Path
import numpy as np
from .. import Dataset, backends, conventions
from ..core import indexing
from ..core.combine import (
_CONCAT_DIM_DEFAULT, _auto_combine, _infer_concat_order_from_positions)
from ..core.utils import close_on_error, is_grib_path, is_remote_uri
from .common import ArrayWriter
from .locks import _get_scheduler
DATAARRAY_NAME = '__xarray_dataarray_name__'
DATAARRAY_VARIABLE = '__xarray_dataarray_variable__'
def _get_default_engine_remote_uri():
try:
import netCDF4 # noqa
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import pydap # noqa
engine = 'pydap'
except ImportError:
raise ValueError('netCDF4 or pydap is required for accessing '
'remote datasets via OPeNDAP')
return engine
def _get_default_engine_grib():
msgs = []
try:
import Nio # noqa
msgs += ["set engine='pynio' to access GRIB files with PyNIO"]
except ImportError: # pragma: no cover
pass
try:
import cfgrib # noqa
msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"]
except ImportError: # pragma: no cover
pass
if msgs:
raise ValueError(' or\n'.join(msgs))
else:
raise ValueError('PyNIO or cfgrib is required for accessing '
'GRIB files')
def _get_default_engine_gz():
try:
import scipy # noqa
engine = 'scipy'
except ImportError: # pragma: no cover
raise ValueError('scipy is required for accessing .gz files')
return engine
def _get_default_engine_netcdf():
try:
import netCDF4 # noqa
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf # noqa
engine = 'scipy'
except ImportError:
raise ValueError('cannot read or write netCDF files without '
'netCDF4-python or scipy installed')
return engine
def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
else:
if filename_or_obj.tell() != 0:
raise ValueError("file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager")
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b'CDF'):
engine = 'scipy'
elif magic_number.startswith(b'\211HDF\r\n\032\n'):
engine = 'h5netcdf'
if isinstance(filename_or_obj, bytes):
raise ValueError("can't open netCDF4/HDF5 as bytes "
"try passing a path or file-like object")
else:
if isinstance(filename_or_obj, bytes) and len(filename_or_obj) > 80:
filename_or_obj = filename_or_obj[:80] + b'...'
raise ValueError('{} is not a valid netCDF file '
'did you mean to pass a string for a path instead?'
.format(filename_or_obj))
return engine
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path):
engine = _get_default_engine_remote_uri()
elif is_grib_path(path):
engine = _get_default_engine_grib()
elif path.endswith('.gz'):
engine = _get_default_engine_gz()
else:
engine = _get_default_engine_netcdf()
return engine
def _normalize_path(path):
if is_remote_uri(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, str):
if not name:
raise ValueError('Invalid name for DataArray or Dataset key: '
'string must be length 1 or greater for '
'serialization to netCDF files')
elif name is not None:
raise TypeError('DataArray.name or Dataset key must be either a '
'string or None for serialization to netCDF files')
for k in dataset.variables:
check_name(k)
def _validate_attrs(dataset):
"""`attrs` must have a string key and a value which is either: a number,
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, str):
if not name:
raise ValueError('Invalid name for attr: string must be '
'length 1 or greater for serialization to '
'netCDF files')
else:
raise TypeError("Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name))
if not isinstance(value, (str, Number, np.ndarray, np.number,
list, tuple)):
raise TypeError('Invalid value for attr: {} must be a number, '
'a string, an ndarray or a list/tuple of '
'numbers/strings for serialization to netCDF '
'files'.format(value))
# Check attrs on the dataset itself
for k, v in dataset.attrs.items():
check_attr(k, v)
# Check attrs on each variable within the dataset
for variable in dataset.variables.values():
for k, v in variable.attrs.items():
check_attr(k, v)
def _protect_dataset_variables_inplace(dataset, cache):
for name, variable in dataset.variables.items():
if name not in variable.dims:
# no need to protect IndexVariable objects
data = indexing.CopyOnWriteArray(variable._data)
if cache:
data = indexing.MemoryCachedArray(data)
variable.data = data
def _finalize_store(write, store):
""" Finalize this store by explicitly syncing and closing"""
del write # ensure writing is done first
store.close()
def open_dataset(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=None, decode_times=True, autoclose=None,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, cache=None, drop_variables=None,
backend_kwargs=None, use_cftime=None):
"""Load and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \
'pseudonetcdf'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. ``chunks={}`` loads the dataset with dask using a single
chunk for all arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
engines = [None, 'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio',
'cfgrib', 'pseudonetcdf']
if engine not in engines:
raise ValueError('unrecognized engine for open_dataset: {}\n'
'must be one of: {}'
.format(engine, engines))
if autoclose is not None:
warnings.warn(
'The autoclose argument is no longer used by '
'xarray.open_dataset() and is now ignored; it will be removed in '
'a future version of xarray. If necessary, you can control the '
'maximum number of simultaneous open files with '
'xarray.set_options(file_cache_maxsize=...).',
FutureWarning, stacklevel=2)
if mask_and_scale is None:
mask_and_scale = not engine == 'pseudonetcdf'
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
if cache is None:
cache = chunks is None
if backend_kwargs is None:
backend_kwargs = {}
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables, use_cftime=use_cftime)
_protect_dataset_variables_inplace(ds, cache)
if chunks is not None:
from dask.base import tokenize
# if passed an actual file path, augment the token with
# the file modification time
if (isinstance(filename_or_obj, str) and
not is_remote_uri(filename_or_obj)):
mtime = os.path.getmtime(filename_or_obj)
else:
mtime = None
token = tokenize(filename_or_obj, mtime, group, decode_cf,
mask_and_scale, decode_times, concat_characters,
decode_coords, engine, chunks, drop_variables,
use_cftime)
name_prefix = 'open_dataset-%s' % token
ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token)
ds2._file_obj = ds._file_obj
else:
ds2 = ds
return ds2
if isinstance(filename_or_obj, Path):
filename_or_obj = str(filename_or_obj)
if isinstance(filename_or_obj, backends.AbstractDataStore):
store = filename_or_obj
elif isinstance(filename_or_obj, str):
filename_or_obj = _normalize_path(filename_or_obj)
if engine is None:
engine = _get_default_engine(filename_or_obj,
allow_remote=True)
if engine == 'netcdf4':
store = backends.NetCDF4DataStore.open(
filename_or_obj, group=group, lock=lock, **backend_kwargs)
elif engine == 'scipy':
store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)
elif engine == 'pydap':
store = backends.PydapDataStore.open(
filename_or_obj, **backend_kwargs)
elif engine == 'h5netcdf':
store = backends.H5NetCDFStore(
filename_or_obj, group=group, lock=lock, **backend_kwargs)
elif engine == 'pynio':
store = backends.NioDataStore(
filename_or_obj, lock=lock, **backend_kwargs)
elif engine == 'pseudonetcdf':
store = backends.PseudoNetCDFDataStore.open(
filename_or_obj, lock=lock, **backend_kwargs)
elif engine == 'cfgrib':
store = backends.CfGribDataStore(
filename_or_obj, lock=lock, **backend_kwargs)
else:
if engine not in [None, 'scipy', 'h5netcdf']:
raise ValueError("can only read bytes or file-like objects "
"with engine='scipy' or 'h5netcdf'")
engine = _get_engine_from_magic_number(filename_or_obj)
if engine == 'scipy':
store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)
elif engine == 'h5netcdf':
store = backends.H5NetCDFStore(filename_or_obj, group=group,
lock=lock, **backend_kwargs)
with close_on_error(store):
ds = maybe_decode_store(store)
# Ensure source filename always stored in dataset object (GH issue #2550)
if 'source' not in ds.encoding:
if isinstance(filename_or_obj, str):
ds.encoding['source'] = filename_or_obj
return ds
def open_dataarray(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=None, decode_times=True, autoclose=None,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, cache=None, drop_variables=None,
backend_kwargs=None, use_cftime=None):
"""Open an DataArray from a netCDF file containing a single data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(filename_or_obj, group=group, decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times, autoclose=autoclose,
concat_characters=concat_characters,
decode_coords=decode_coords, engine=engine,
chunks=chunks, lock=lock, cache=cache,
drop_variables=drop_variables,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime)
if len(dataset.data_vars) != 1:
raise ValueError('Given file dataset contains more than one data '
'variable. Please read with xarray.open_dataset and '
'then select the variable you want.')
else:
data_array, = dataset.data_vars.values()
data_array._file_obj = dataset._file_obj
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
class _MultiFileCloser(object):
def __init__(self, file_objs):
self.file_objs = file_objs
def close(self):
for f in self.file_objs:
f.close()
def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts', preprocess=None, engine=None,
lock=None, data_vars='all', coords='different',
autoclose=None, parallel=False, **kwargs):
"""Open multiple files as a single dataset.
Requires dask to be installed. See documentation for details on dask [1].
Attributes from the first dataset file are used for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details [2].
concat_dim : None, str, DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xarray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
* 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* 'equals': all values and dimensions must be the same.
* 'identical': all values, dimensions and attributes must be the
same.
* 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding['source']``.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
""" # noqa
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
'cannot do wild-card matching for paths that are remote URLs: '
'{!r}. Instead, supply paths as an explicit list of strings.'
.format(paths))
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, Path) else p for p in paths]
if not paths:
raise IOError('no files to open')
# Coerce 1D input into ND to maintain backwards-compatible API until API
# for N-D combine decided
# (see https://github.com/pydata/xarray/pull/2553/#issuecomment-445892746)
if concat_dim is None or concat_dim is _CONCAT_DIM_DEFAULT:
concat_dims = concat_dim
elif not isinstance(concat_dim, list):
concat_dims = [concat_dim]
else:
concat_dims = concat_dim
infer_order_from_coords = False
# If infer_order_from_coords=True then this is unnecessary, but quick.
# If infer_order_from_coords=False then this creates a flat list which is
# easier to iterate over, while saving the originally-supplied structure
combined_ids_paths, concat_dims = _infer_concat_order_from_positions(
paths, concat_dims)
ids, paths = (
list(combined_ids_paths.keys()), list(combined_ids_paths.values()))
open_kwargs = dict(engine=engine, chunks=chunks or {}, lock=lock,
autoclose=autoclose, **kwargs)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
file_objs = [getattr_(ds, '_file_obj') for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, file_objs = dask.compute(datasets, file_objs)
# Close datasets in case of a ValueError
try:
if infer_order_from_coords:
# Discard ordering because it should be redone from coordinates
ids = False
combined = _auto_combine(
datasets, concat_dims=concat_dims,
compat=compat,
data_vars=data_vars, coords=coords,
infer_order_from_coords=infer_order_from_coords,
ids=ids)
except ValueError:
for ds in datasets:
ds.close()
raise
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
return combined
WRITEABLE_STORES = {'netcdf4': backends.NetCDF4DataStore.open,
'scipy': backends.ScipyDataStore,
'h5netcdf': backends.H5NetCDFStore}
def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
engine=None, encoding=None, unlimited_dims=None, compute=True,
multifile=False):
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``multifile`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, Path):
path_or_file = str(path_or_file)
if encoding is None:
encoding = {}
if path_or_file is None:
if engine is None:
engine = 'scipy'
elif engine != 'scipy':
raise ValueError('invalid engine for creating bytes with '
'to_netcdf: %r. Only the default engine '
"or engine='scipy' is supported" % engine)
if not compute:
raise NotImplementedError(
'to_netcdf() with compute=False is not yet implemented when '
'returning bytes')
elif isinstance(path_or_file, str):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
else: # file-like object
engine = 'scipy'
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError('unrecognized engine for to_netcdf: %r' % engine)
if format is not None:
format = format.upper()
# handle scheduler specific logic
scheduler = _get_scheduler()
have_chunks = any(v.chunks for v in dataset.variables.values())
autoclose = have_chunks and scheduler in ['distributed', 'multiprocessing']
if autoclose and engine == 'scipy':
raise NotImplementedError("Writing netCDF files with the %s backend "
"is not currently supported with dask's %s "
"scheduler" % (engine, scheduler))
target = path_or_file if path_or_file is not None else BytesIO()
kwargs = dict(autoclose=True) if autoclose else {}
store = store_open(target, mode, format, group, **kwargs)
if unlimited_dims is None:
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
if isinstance(unlimited_dims, str):
unlimited_dims = [unlimited_dims]
writer = ArrayWriter()
# TODO: figure out how to refactor this logic (here and in save_mfdataset)
# to avoid this mess of conditionals
try:
# TODO: allow this work (setting up the file for writing array data)
# to be parallelized with dask
dump_to_store(dataset, store, writer, encoding=encoding,
unlimited_dims=unlimited_dims)
if autoclose:
store.close()
if multifile:
return writer, store
writes = writer.sync(compute=compute)
if path_or_file is None:
store.sync()
return target.getvalue()
finally:
if not multifile and compute:
store.close()
if not compute:
import dask
return dask.delayed(_finalize_store)(writes, store)
def dump_to_store(dataset, store, writer=None, encoder=None,
encoding=None, unlimited_dims=None):
"""Store dataset contents to a backends.*DataStore object."""
if writer is None:
writer = ArrayWriter()
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(dataset)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding, writer,
unlimited_dims=unlimited_dims)
def save_mfdataset(datasets, paths, mode='w', format=None, groups=None,
engine=None, compute=True):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == 'w' and len(set(paths)) < len(paths):
raise ValueError("cannot use mode='w' when writing multiple "
'datasets to the same path')
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError('save_mfdataset only supports writing Dataset '
'objects, received type %s' % type(obj))
if groups is None:
groups = [None] * len(datasets)
if len(set([len(datasets), len(paths), len(groups)])) > 1:
raise ValueError('must supply lists of the same length for the '
'datasets, paths and groups arguments to '
'save_mfdataset')
writers, stores = zip(*[
to_netcdf(ds, path, mode, format, group, engine, compute=compute,
multifile=True)
for ds, path, group in zip(datasets, paths, groups)])
try:
writes = [w.sync(compute=compute) for w in writers]
finally:
if compute:
for store in stores:
store.close()
if not compute:
import dask
return dask.delayed([dask.delayed(_finalize_store)(w, s)
for w, s in zip(writes, stores)])
def to_zarr(dataset, store=None, mode='w-', synchronizer=None, group=None,
encoding=None, compute=True, consolidated=False):
"""This function creates an appropriate datastore for writing a dataset to
a zarr ztore
See `Dataset.to_zarr` for full API docs.
"""
if isinstance(store, Path):
store = str(store)
if encoding is None:
encoding = {}
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
zstore = backends.ZarrStore.open_group(store=store, mode=mode,
synchronizer=synchronizer,
group=group,
consolidate_on_close=consolidated)
writer = ArrayWriter()
# TODO: figure out how to properly handle unlimited_dims
dump_to_store(dataset, zstore, writer, encoding=encoding)
writes = writer.sync(compute=compute)
if compute:
_finalize_store(writes, zstore)
else:
import dask
return dask.delayed(_finalize_store)(writes, zstore)
return zstore
| {
"content_hash": "8072a24731a8c221563dacc28daa6588",
"timestamp": "",
"source": "github",
"line_count": 985,
"max_line_length": 82,
"avg_line_length": 41.753299492385786,
"alnum_prop": 0.6313127629051475,
"repo_name": "chunweiyuan/xarray",
"id": "afb69f6e9e94f2433faebd41d7617fe65ba399d9",
"size": "41127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xarray/backends/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3150"
},
{
"name": "Python",
"bytes": "2336715"
}
],
"symlink_target": ""
} |
"""
Ref: http://shin.hateblo.jp/entry/2013/03/23/211750
Usage 1. tm.start() is ued to start, and tm.update('tag') is to show time with
'tag' name.
tm = TimeMeasure()
tm.start()
# Run method1
tm.update('method1')
# Run method2
tm.update('method2')
...
tm.start()
# Run method3
tm.update('method3')
Usage 2. with ... statement
with TimeMeasure('tag'):
# Do something here with time measured
"""
from time import time
# Singleton time measure instance
_chainerex_tm_singleton = None
class TimeMeasure:
DEFAULT_TAG = 'time'
def __init__(self, tag=None, loglevel=5):
"""
Args:
tag (str or None):
loglevel (int):
- 0 -> not print
- 1 -> error
- 2 -> warning
- 3 -> info
- 4 -> debug
- 5 -> verbose
"""
self.tag = self.DEFAULT_TAG if tag is None else tag
self.loglevel = loglevel
# -- initalize --
self.tag_count_dict = {}
self.tag_time_dict = {}
self.t = time()
@classmethod
def get_instance(cls, tag=None, loglevel=5):
global _chainerex_tm_singleton
if _chainerex_tm_singleton is None:
_chainerex_tm_singleton = cls(tag=tag, loglevel=loglevel)
return _chainerex_tm_singleton
def _update_tag_dict(self, tag, t):
if self.tag in self.tag_time_dict.keys():
self.tag_count_dict[tag] += 1
self.tag_time_dict[tag] += t
else:
self.tag_count_dict[tag] = 1
self.tag_time_dict[tag] = t
def __enter__(self):
self.t = time()
return self
def __exit__(self, type, value, traceback):
t_end = time()
if self.loglevel >= 3:
print('[TimeMeasure] {}: {} sec'.format(self.tag, t_end - self.t))
self._update_tag_dict(self.tag, t_end - self.t)
def start(self):
self.t = time()
def update(self, tag=None):
self.tag = self.DEFAULT_TAG if tag is None else tag
t_end = time()
if self.loglevel >= 5:
print('[TimeMeasure] {}: {:.6f} sec'.format(self.tag, t_end - self.t))
self._update_tag_dict(self.tag, t_end - self.t)
self.t = t_end
def show_stats(self):
if self.loglevel >= 4:
for k in self.tag_time_dict.keys():
print('[TimeMeasure.show_stats] {}: {:.6f} sec / {:6} count'
.format(k, self.tag_time_dict[k], self.tag_count_dict[k]))
def show_average(self):
if self.loglevel >= 4:
for k in self.tag_time_dict.keys():
t = self.tag_time_dict[k] / self.tag_count_dict[k]
print('[TimeMeasure.show_average] {}: {:.6f} sec'.format(k, t))
# def end(self, tag=None):
# self.tag = tag
# t_end = time()
# print('{}: {} sec'.format(self.tag, t_end - self.time_dict[self.tag]))
if __name__ == '__main__':
# Demo
# tm = TimeMeasure(loglevel=4)
tm = TimeMeasure.get_instance(loglevel=4)
num_repeat = 10000
for _ in range(num_repeat):
a = 5 ** 5
tm.update('{} calculation'.format(num_repeat))
tm.update('hoge')
tm.update('hoge')
tm.update('hoge')
tm.show_stats()
tm.show_average()
| {
"content_hash": "9b1b4da12622d677840b2e7a28ed6222",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 82,
"avg_line_length": 26.706349206349206,
"alnum_prop": 0.525111441307578,
"repo_name": "corochann/chainerex",
"id": "41a087402b7d04318fdd480ab4d8287884bc4283",
"size": "3365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainerex/utils/time_measure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124900"
}
],
"symlink_target": ""
} |
from HTMLParser import HTMLParser
import logging
import os
import re
from docs_server_utils import FormatKey
from file_system import FileNotFoundError
import compiled_file_system as compiled_fs
from third_party.handlebar import Handlebar
# TODO(kalman): rename this HTMLDataSource or other, then have separate intro
# article data sources created as instances of it.
# Increment this if the data model changes for IntroDataSource.
_VERSION = 5
_H1_REGEX = re.compile('<h1[^>.]*?>.*?</h1>', flags=re.DOTALL)
class _IntroParser(HTMLParser):
""" An HTML parser which will parse table of contents and page title info out
of an intro.
"""
def __init__(self):
HTMLParser.__init__(self)
self.toc = []
self.page_title = None
self._recent_tag = None
self._current_heading = {}
def handle_starttag(self, tag, attrs):
id_ = ''
if tag not in ['h1', 'h2', 'h3']:
return
if tag != 'h1' or self.page_title is None:
self._recent_tag = tag
for attr in attrs:
if attr[0] == 'id':
id_ = attr[1]
if tag == 'h2':
self._current_heading = { 'link': id_, 'subheadings': [], 'title': '' }
self.toc.append(self._current_heading)
elif tag == 'h3':
self._current_heading = { 'link': id_, 'title': '' }
self.toc[-1]['subheadings'].append(self._current_heading)
def handle_endtag(self, tag):
if tag in ['h1', 'h2', 'h3']:
self._recent_tag = None
def handle_data(self, data):
if self._recent_tag is None:
return
if self._recent_tag == 'h1':
if self.page_title is None:
self.page_title = data
else:
self.page_title += data
elif self._recent_tag in ['h2', 'h3']:
self._current_heading['title'] += data
class IntroDataSource(object):
"""This class fetches the intros for a given API. From this intro, a table
of contents dictionary is created, which contains the headings in the intro.
"""
class Factory(object):
def __init__(self, compiled_fs_factory, ref_resolver_factory, base_paths):
self._cache = compiled_fs_factory.Create(self._MakeIntroDict,
IntroDataSource,
version=_VERSION)
self._ref_resolver = ref_resolver_factory.Create()
self._base_paths = base_paths
def _MakeIntroDict(self, intro_path, intro):
# Guess the name of the API from the path to the intro.
api_name = os.path.splitext(intro_path.split('/')[-1])[0]
intro_with_links = self._ref_resolver.ResolveAllLinks(intro,
namespace=api_name)
apps_parser = _IntroParser()
apps_parser.feed(Handlebar(intro_with_links).render(
{ 'is_apps': True }).text)
extensions_parser = _IntroParser()
extensions_parser.feed(Handlebar(intro_with_links).render(
{ 'is_apps': False }).text)
# TODO(cduvall): Use the normal template rendering system, so we can check
# errors.
if extensions_parser.page_title != apps_parser.page_title:
logging.error(
'Title differs for apps and extensions: Apps: %s, Extensions: %s.' %
(extensions_parser.page_title, apps_parser.page_title))
# The templates will render the heading themselves, so remove it from the
# HTML content.
intro_with_links = re.sub(_H1_REGEX, '', intro_with_links, count=1)
return {
'intro': Handlebar(intro_with_links),
'title': apps_parser.page_title,
'apps_toc': apps_parser.toc,
'extensions_toc': extensions_parser.toc,
}
def Create(self):
return IntroDataSource(self._cache, self._base_paths)
def __init__(self, cache, base_paths):
self._cache = cache
self._base_paths = base_paths
def get(self, key):
path = FormatKey(key)
def get_from_base_path(base_path):
return self._cache.GetFromFile('%s/%s' % (base_path, path))
for base_path in self._base_paths:
try:
return get_from_base_path(base_path)
except FileNotFoundError:
continue
# Not found. Do the first operation again so that we get a stack trace - we
# know that it'll fail.
get_from_base_path(self._base_paths[0])
raise AssertionError()
| {
"content_hash": "4b66978868a0e3e9d3d966e8da537267",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 36.15966386554622,
"alnum_prop": 0.6177085754125029,
"repo_name": "codenote/chromium-test",
"id": "b4407c0b9ea46c2716d0d603a03df76f97352025",
"size": "4470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chrome/common/extensions/docs/server2/intro_data_source.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from JumpScale import j
import ujson
class AUTH():
def load(self,osis):
pass
def authenticate(self,osis,method,user,passwd, session):
if j.core.osis.cmds._authenticateAdmin(user=user,passwd=passwd):
return True
return False
| {
"content_hash": "fc46080505ba12abb44366efd5bd9c81",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 24.545454545454547,
"alnum_prop": 0.6592592592592592,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "8d7b3c32700d13063a4ac16acceaea8c1abb2219",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/osis/logic/system/user/OSIS_auth.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
Subsets and Splits