code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesConfigurationOperations:
"""VpnSitesConfigurationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _download_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._download_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
async def begin_download(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Gives the sas-url to download the configurations for vpn-sites in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is
needed.
:type virtual_wan_name: str
:param request: Parameters supplied to download vpn-sites configuration.
:type request: ~azure.mgmt.network.v2021_05_01.models.GetVpnSitesConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/aio/operations/_vpn_sites_configuration_operations.py | Python | mit | 8,266 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
from __future__ import unicode_literals
from django.utils.text import get_text_list
# Models
from django.db.models import Count
from opencivicdata.elections.models import (
Election,
ElectionIdentifier,
ElectionSource
)
from calaccess_processed.proxies import OCDProxyModelMixin
from ..core.posts import OCDPostProxy
from .candidatecontests import OCDCandidateContestProxy
# Managers
from calaccess_processed.managers import BulkLoadSQLManager
from calaccess_processed_elections.managers import (
OCDPartisanPrimaryManager,
OCDElectionManager
)
class OCDElectionProxy(Election, OCDProxyModelMixin):
"""
A proxy on the OCD Election model.
"""
objects = OCDElectionManager()
partisan_primaries = OCDPartisanPrimaryManager()
copy_to_fields = (
('id',),
('name',),
('date',),
('division_id',),
('administrative_organization_id',),
('created_at',),
('updated_at',),
('extras',),
('locked_fields',),
)
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
def add_election_type(self, election_type):
"""
Add election_type to 'calaccess_election_type' in extras field (if missing).
"""
if 'calaccess_election_type' in self.extras.keys():
# and if this one isn't included
if election_type not in self.extras[
'calaccess_election_type'
]:
# then append
self.extras['calaccess_election_type'].append(election_type)
# and save
self.save()
else:
# if election doesn't already have types, add the key
self.extras['calaccess_election_type'] = [election_type]
# and save
self.save()
return
def add_election_id(self, election_id):
"""
Add election_id to identifiers, if missing.
"""
if not self.identifiers.filter(
scheme='calaccess_election_id',
identifier=election_id,
).exists():
self.identifiers.create(
scheme='calaccess_election_id',
identifier=election_id,
)
self.save()
return
def get_regular_senate_contests_in_wrong_districts(self):
"""
Get a list of regular senate contests in districts that shouldn't be contested.
"""
if self.is_gubernatorial_election:
# in gubernatorial elections,
# odd-numbered senate districts should not contested
contests = [
c for c in self.senate_contests.regular()
if int(c.division.subid2) % 2 != 0
]
else:
# in non-gubernatorial elections,
# even-numbered senate districts should not be contests
contests = [
c for c in self.senate_contests.regular()
if int(c.division.subid2) % 2 == 0
]
return contests
@property
def assembly_contests(self):
"""
State Assembly CandidateContests occurring in the election.
"""
return self.candidate_contest_proxies.assembly()
@property
def candidate_contest_proxies(self):
"""
A QuerySet of OCDCandidateContestProxy for the election.
"""
return OCDCandidateContestProxy.objects.filter(election=self)
@property
def election_type(self):
"""
Returns the primary CAL-ACCESS election type included with this record.
"""
for et in self.extras.get('calaccess_election_type', []):
if et in self.name:
return et
@property
def election_types(self):
"""
Returns all the CAL-ACCESS election types included with this record.
"""
return self.extras.get('calaccess_election_type', [])
@property
def executive_contests(self):
"""
State Executive Branch CandidateContests occurring in the election.
"""
return self.candidate_contest_proxies.executive()
@property
def has_special_contests(self):
"""
This election includes contests outside the regular election calendar.
"""
special_election_types = set(
("SPECIAL ELECTION", "SPECIAL RUNOFF", "RECALL")
)
return len(
special_election_types.intersection(self.election_types)
) > 0
@property
def identifier_list(self):
"""
Returns a prettified list of OCD identifiers.
"""
template = "{0.scheme}: {0.identifier}"
return get_text_list([template.format(i) for i in self.identifiers.all()])
@property
def is_gubernatorial_election(self):
"""
This election should include contests for Governor other executive branch offices.
"""
# Governors are elected every four years, and the earliest such election
# in CAL-ACCESS was 2002
return (self.date.year - 2002) % 4 == 0
@property
def is_partisan_primary(self):
"""
Returns whether or not this was a primary election held in the partisan era prior to 2012.
"""
if 'PRIMARY' in self.election_types:
if self.date.year < 2012:
return True
return False
@property
def regular_assembly_contest_count_actual(self):
"""
Actual count of regular State Assembly contests.
"""
return self.assembly_contests.regular().count()
@property
def regular_assembly_contest_count_expected(self):
"""
Expected count of regular State Assembly contests (based on year).
"""
assembly_office_count = OCDPostProxy.assembly.count()
if "GENERAL" in self.election_types:
expected_contest_count = assembly_office_count
elif "PRIMARY" in self.election_types:
if self.is_partisan_primary:
# should be one contest for every distinct party in each
# of the (80) assembly seats
expected_contest_count = 0
contests_q = self.assembly_contests.regular()
contest_counts_by_party = contests_q.order_by().values(
'candidacies__party__name'
).annotate(
contest_count=Count('candidacies__contest', distinct=True)
)
for party in contest_counts_by_party:
expected_contest_count += party['contest_count']
else:
expected_contest_count = assembly_office_count
else:
expected_contest_count = 0
return expected_contest_count
@property
def regular_executive_contest_count_actual(self):
"""
Actual count of regular State Executive Branch contests.
"""
return self.executive_contests.regular().count()
@property
def regular_executive_contest_count_expected(self):
"""
Expected count of regular State Assembly contests (based on year).
"""
exec_office_count = OCDPostProxy.executive.count()
if self.is_gubernatorial_election:
if "GENERAL" in self.election_types:
expected_contest_count = exec_office_count
elif "PRIMARY" in self.election_types:
if self.is_partisan_primary:
# should be 1 contest for every distinct party in each
# of the executive branch offices (12)
expected_contest_count = 0
contests_q = self.executive_contests.regular()
contest_counts_by_party = contests_q.order_by().values(
'candidacies__party__name'
).annotate(
contest_count=Count('candidacies__contest', distinct=True)
)
for party in contest_counts_by_party:
expected_contest_count += party['contest_count']
else:
expected_contest_count = exec_office_count
else:
expected_contest_count = 0
else:
expected_contest_count = 0
return expected_contest_count
@property
def regular_senate_contest_count_actual(self):
"""
Actual count of regular State Senate contests.
"""
return self.senate_contests.regular().count()
@property
def regular_senate_contest_count_expected(self):
"""
Confirm the Election has the correct count of regular Assembly contests.
"""
# half of the senates are filled every two years
senate_office_count = int(OCDPostProxy.senate.count() / 2)
if "GENERAL" in self.election_types:
expected_contest_count = senate_office_count
elif "PRIMARY" in self.election_types:
if self.is_partisan_primary:
# should be one contest for every distinct party in every
# other senate district (20)
expected_contest_count = 0
contests_q = self.senate_contests.regular()
contest_counts_by_party = contests_q.order_by().values(
'candidacies__party__name'
).annotate(
contest_count=Count('candidacies__contest', distinct=True)
)
for party in contest_counts_by_party:
expected_contest_count += party['contest_count']
else:
expected_contest_count = senate_office_count
else:
expected_contest_count = 0
return expected_contest_count
@property
def senate_contests(self):
"""
State Senate CandidateContests occurring in the election.
"""
return self.candidate_contest_proxies.senate()
@property
def source_list(self):
"""
Returns a prettified list of OCD sources.
"""
return get_text_list(list(self.sources.all()))
class OCDElectionIdentifierProxy(ElectionIdentifier, OCDProxyModelMixin):
"""
A proxy on the OCD ElectionIdentifier model.
"""
objects = BulkLoadSQLManager()
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
class OCDElectionSourceProxy(ElectionSource, OCDProxyModelMixin):
"""
A proxy on the OCD ElectionSource model.
"""
objects = BulkLoadSQLManager()
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
| california-civic-data-coalition/django-calaccess-processed-data | calaccess_processed_elections/proxies/opencivicdata/elections/elections.py | Python | mit | 11,031 |
#-------------------------------------------------------------------------------
# dump.py
# Dump binary files into C arrays.
#-------------------------------------------------------------------------------
Version = 3
import os.path
import yaml
import genutil
#-------------------------------------------------------------------------------
def get_file_path(filename, file_path) :
'''
Returns absolute path to an input file, given file name and
another full file path in the same directory.
'''
return '{}/{}'.format(os.path.dirname(file_path), filename)
#-------------------------------------------------------------------------------
def get_file_cname(filename) :
return 'dump_{}'.format(os.path.splitext(filename)[0])
#-------------------------------------------------------------------------------
def gen_header(out_hdr, files) :
with open(out_hdr, 'w') as f:
f.write('#pragma once\n')
f.write('// #version:{}#\n'.format(Version))
f.write('// machine generated, do not edit!\n')
f.write('namespace YAKC {\n')
for file in files :
file_path = get_file_path(file, out_hdr)
if os.path.isfile(file_path) :
file_name = get_file_cname(file)
file_size = os.path.getsize(file_path)
f.write('extern unsigned char {}[{}];\n'.format(file_name, file_size))
else :
genutil.fmtError("Input file not found: '{}'".format(file_path))
f.write('} // namespace YAKC\n')
#-------------------------------------------------------------------------------
def gen_source(out_src, files) :
with open(out_src, 'w') as f:
f.write('// #version:{}#\n'.format(Version))
f.write('// machine generated, do not edit!\n')
f.write('#include "{}.h"\n'.format(os.path.splitext(os.path.basename(out_src))[0]))
f.write('namespace YAKC {\n')
for file in files :
file_path = get_file_path(file, out_src)
if os.path.isfile(file_path) :
with open(file_path, 'rb') as src_file:
file_data = src_file.read()
file_name = get_file_cname(file)
file_size = os.path.getsize(file_path)
f.write('unsigned char {}[{}] = {{\n'.format(file_name, file_size))
num = 0
for byte in file_data :
f.write(hex(ord(byte)) + ', ')
num += 1
if 0 == num%16:
f.write('\n')
f.write('\n};\n')
else :
genutil.fmtError("Input file not found: '{}'".format(file_path))
f.write('} // namespace YAKC\n')
#-------------------------------------------------------------------------------
def generate(input, out_src, out_hdr) :
if genutil.isDirty(Version, [input], [out_src, out_hdr]) :
with open(input, 'r') as f :
desc = yaml.load(f)
gen_header(out_hdr, desc['files'])
gen_source(out_src, desc['files'])
| floooh/yakc | fips-files/generators/dump.py | Python | mit | 3,142 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Disktype(MakefilePackage):
"""A fork of the disktype disk and disk image format detection tool."""
homepage = "https://github.com/kamwoods/disktype"
url = "https://github.com/kamwoods/disktype/archive/9.2.1.tar.gz"
version('9.2.1', sha256='fb274d6ce6b69c0d36eb23fcc9f01db3c32c3996b404900d46bb743ce4fa8154')
build_directory = 'src'
def install(self, spec, prefix):
mkdir(prefix.bin)
with working_dir(self.build_directory):
install('disktype', prefix.bin)
| LLNL/spack | var/spack/repos/builtin/packages/disktype/package.py | Python | lgpl-2.1 | 741 |
#!/usr/bin/env python
#
# This file is part of Flap.
#
# Flap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Flap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Flap. If not, see <http://www.gnu.org/licenses/>.
#
from flap import logger
from flap.latex.macros.commons import Macro
class TexFileInclusion(Macro):
def __init__(self, flap, name):
super().__init__(flap, name, None, None)
self._requires_expansion = True
def _capture_arguments(self, parser, invocation):
invocation.append_argument("link", parser.read.one())
def execute2(self, parser, invocation):
self._called = True
logger.debug("Setting CALLED on %d", id(self))
link = parser.evaluate_as_text(invocation.argument("link"))
content = self._flap.content_of(link, invocation)
logger.debug("TEX INCLUSION %s: '%s'", link, content)
if not link.endswith(".tex"):
link += ".tex"
tokens = parser._create.as_list(content)
return parser._tokens.push(tokens)
def rewrite2(self, parser, invocation):
return []
class Input(TexFileInclusion):
"""
Intercept the `\\input` directive
"""
def __init__(self, flap):
super().__init__(flap, "input")
class Include(TexFileInclusion):
"""
Intercept the `\\include` directive
"""
def __init__(self, flap):
super().__init__(flap, "include")
def execute2(self, parser, invocation):
self._called = True
link = parser.evaluate_as_text(invocation.argument("link"))
if self._flap.shall_include(link):
tokens = parser._create.as_list(r"\clearpage")
parser._tokens.push(tokens)
super().execute2(parser, invocation)
def rewrite2(self, parser, invocation):
return []
class SubFile(TexFileInclusion):
def __init__(self, flap):
super().__init__(flap, "subfile")
class EndInput(Macro):
def __init__(self, flap):
super().__init__(flap, "endinput", None, None)
def execute2(self, parser, invocation):
source = invocation.name.location.source
self._flap.end_of_input(source, invocation)
parser.flush(source)
def rewrite2(self, parser, invocation):
return []
class IncludeOnly(Macro):
"""
Intercept includeonly commands
"""
def __init__(self, flap):
super().__init__(flap, r"includeonly", None, None)
def _capture_arguments(self, parser, invocation):
invocation.append_argument("selection", parser.read.one())
def execute2(self, parser, invocation):
pass
def rewrite2(self, parser, invocation):
text = parser.evaluate_as_text(invocation.argument("selection"))
files_to_include = list(map(str.strip, text.split(",")))
self._flap.include_only(files_to_include, invocation)
return []
| fchauvel/flap | flap/latex/macros/inlining.py | Python | gpl-3.0 | 3,335 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class DatabasesOperations(object):
"""DatabasesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2014-04-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2014-04-01"
self.config = config
def import_method(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Imports a bacpac into an existing database. The existing database must
be empty.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to import into
:type database_name: str
:param parameters: The required parameters for importing a Bacpac into
a database.
:type parameters: :class:`ImportExtensionRequestParameters
<azure.mgmt.sql.models.ImportExtensionRequestParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`ImportExportOperationResponse
<azure.mgmt.sql.models.ImportExportOperationResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/extensions/import'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ImportExtensionRequestParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImportExportOperationResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def export(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Exports a database to a bacpac.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be exported.
:type database_name: str
:param parameters: The required parameters for exporting a database.
:type parameters: :class:`ExportRequestParameters
<azure.mgmt.sql.models.ExportRequestParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`ImportExportOperationResponse
<azure.mgmt.sql.models.ImportExportOperationResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/export'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExportRequestParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImportExportOperationResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete_replication_link(
self, resource_group_name, server_name, database_name, link_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a database replication link. Cannot be done during failover.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database that has the
replication link to be dropped.
:type database_name: str
:param link_id: The ID of the replication link to be deleted.
:type link_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/replicationLinks/{linkId}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'linkId': self._serialize.url("link_id", link_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_replication_link(
self, resource_group_name, server_name, database_name, link_id, custom_headers=None, raw=False, **operation_config):
"""Gets a database replication link.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to get the link for.
:type database_name: str
:param link_id: The replication link ID to be retrieved.
:type link_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ReplicationLink
<azure.mgmt.sql.models.ReplicationLink>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/replicationLinks/{linkId}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'linkId': self._serialize.url("link_id", link_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationLink', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def failover_replication_link(
self, resource_group_name, server_name, database_name, link_id, custom_headers=None, raw=False, **operation_config):
"""Failover the database replication link.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database that has the
replication link to be failed over.
:type database_name: str
:param link_id: The ID of the replication link to be failed over.
:type link_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/replicationLinks/{linkId}/failover'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'linkId': self._serialize.url("link_id", link_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def failover_replication_link_allow_data_loss(
self, resource_group_name, server_name, database_name, link_id, custom_headers=None, raw=False, **operation_config):
"""Force failover the database replication link, which may result in data
loss.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database that has the
replication link to be failed over.
:type database_name: str
:param link_id: The ID of the replication link to be failed over.
:type link_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/replicationLinks/{linkId}/forceFailoverAllowDataLoss'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'linkId': self._serialize.url("link_id", link_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_replication_links(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Lists a database's replication links.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to retrieve links for.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ReplicationLinkPaged
<azure.mgmt.sql.models.ReplicationLinkPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/replicationLinks'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ReplicationLinkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ReplicationLinkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def pause_data_warehouse(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Pauses a data warehouse.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the data warehouse to pause.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/pause'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def resume_data_warehouse(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Resumes a data warehouse.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the data warehouse to resume.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/resume'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_restore_points(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns a list of database restore points.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database from which to retrieve
available restore points.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RestorePointPaged
<azure.mgmt.sql.models.RestorePointPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/restorePoints'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RestorePointPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RestorePointPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a new database or updates an existing database. Location is a
required property in the request body, and it must be the same as the
location of the SQL server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be operated on
(updated or created).
:type database_name: str
:param parameters: The required parameters for creating or updating a
database.
:type parameters: :class:`Database <azure.mgmt.sql.models.Database>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Database
<azure.mgmt.sql.models.Database>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Database')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if response.status_code == 201:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be deleted.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, server_name, database_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets a database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database to be retrieved.
:type database_name: str
:param expand: A comma separated list of child objects to expand in
the response. Possible properties: serviceTierAdvisors, upgradeHint,
transparentDataEncryption.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Database <azure.mgmt.sql.models.Database>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Database', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_server(
self, resource_group_name, server_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Returns a list of databases in a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param filter: An OData filter expression that describes a subset of
databases to return.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabasePaged <azure.mgmt.sql.models.DatabasePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DatabasePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DatabasePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_usages(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns database usages.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DatabaseMetricPaged
<azure.mgmt.sql.models.DatabaseMetricPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/usages'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DatabaseMetricPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DatabaseMetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_service_tier_advisor(
self, resource_group_name, server_name, database_name, service_tier_advisor_name, custom_headers=None, raw=False, **operation_config):
"""Gets a service tier advisor.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of database.
:type database_name: str
:param service_tier_advisor_name: The name of service tier advisor.
:type service_tier_advisor_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServiceTierAdvisor
<azure.mgmt.sql.models.ServiceTierAdvisor>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/serviceTierAdvisors/{serviceTierAdvisorName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'serviceTierAdvisorName': self._serialize.url("service_tier_advisor_name", service_tier_advisor_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServiceTierAdvisor', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_service_tier_advisors(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns service tier advisors for specified database.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServiceTierAdvisorPaged
<azure.mgmt.sql.models.ServiceTierAdvisorPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/serviceTierAdvisors'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ServiceTierAdvisorPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ServiceTierAdvisorPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update_transparent_data_encryption_configuration(
self, resource_group_name, server_name, database_name, status=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a database's transparent data encryption
configuration.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database for which setting the
transparent data encryption applies.
:type database_name: str
:param status: The status of the database transparent data encryption.
Possible values include: 'Enabled', 'Disabled'
:type status: str or :class:`TransparentDataEncryptionStates
<azure.mgmt.sql.models.TransparentDataEncryptionStates>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TransparentDataEncryption
<azure.mgmt.sql.models.TransparentDataEncryption>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TransparentDataEncryption(status=status)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/current'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TransparentDataEncryption')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TransparentDataEncryption', response)
if response.status_code == 201:
deserialized = self._deserialize('TransparentDataEncryption', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_transparent_data_encryption_configuration(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Gets a database's transparent data encryption configuration.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database for which the
transparent data encryption applies.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TransparentDataEncryption
<azure.mgmt.sql.models.TransparentDataEncryption>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/current'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TransparentDataEncryption', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_transparent_data_encryption_activity(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns a database's transparent data encryption operation result.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database for which the
transparent data encryption applies.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TransparentDataEncryptionActivityPaged
<azure.mgmt.sql.models.TransparentDataEncryptionActivityPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/current/operationResults'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TransparentDataEncryptionActivityPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TransparentDataEncryptionActivityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| rjschwei/azure-sdk-for-python | azure-mgmt-sql/azure/mgmt/sql/operations/databases_operations.py | Python | mit | 77,141 |
#!/usr/bin/env python
import math
import sys
inFile = open(sys.argv[1])
alnLine = inFile.readline()
vals = alnLine.split()
query = vals[16]
aln = vals[17]
target= vals[18]
i = 0
hplen = 6
nPATIns = 0
nPATDel = 0
nDel = query.count('-')
nIns = target.count('-')
nMismatch = 0
while (i < len(query)):
if (aln[i] == '*'):
if (query[i] != '-' and target[i] != '-' and query[i] != target[i]):
nMismatch += 1
i += 1
continue
j = i + 1
while (j < len(query) and aln[j] == '*'):
j+=1
ql = i
qr = j
tl = ql
tr = qr
while (ql > 0 and ((query[i-1] == 'T' or query[i-1] == 'A') and query[ql-1] == query[i-1] )):
ql-= 1
while (tl > 0 and ((target[i-1] == 'T' or target[i-1] == 'A') and target[tl-1] == target[i-1])):
tl-= 1
while (qr < len(query) and ((query[j] == 'T' or query[j] == 'A') and query[qr] == query[j])):
qr+= 1
while (tr < len(target) and ((target[j] == 'T' or target[j] == 'A') and target[tr] == target[j])):
tr+= 1
if (query[i] == '-'):
indel = 'del'
else:
indel = 'ins'
if (i - ql > hplen or i - tl > hplen or qr - j > hplen or tr - j > hplen):
patlen = max(i - ql, i - tl , qr - j, tr - j)
motif = 'pAT'
print indel + " " + motif + " " + str(j - i) + " " + str(patlen)
if (indel == 'del'):
nPATDel += j-i
else:
nPATIns += j-i
i = j
else:
if (query[i] != target[i]):
nMismatch += 1
i += 1
print "summary: " + "npATdel: " + str(nPATDel) + " npATins: " + str(nPATIns)
print "mm: " + str(nMismatch)
print "length: " + str(len(target) - nDel)
print "total del: " + str(nDel) + " ins: "+ str(nIns)
print "phred: {:2.1f}".format(-10*math.log10((nMismatch + nDel + nIns) / float(len(target))))
| yunlongliukm/chm1_scripts | one_off/SummarizeM5Alignment.py | Python | mit | 1,973 |
import json
import os
import cloudstorage
from google.appengine.api import app_identity
import webapp2
import instagram
import taxonomy
BUCKET_NAME = os.environ.get(
'BUCKET_NAME', app_identity.get_default_gcs_bucket_name())
JSON_OUT = '/%s/birds.json' % BUCKET_NAME
cloudstorage.set_default_retry_params(
cloudstorage.RetryParams(
initial_delay=0.2, max_delay=5.0, backoff_factor=2, max_retry_period=15
))
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(BUCKET_NAME)
instagram_posts = instagram.getPostsByEnglishName()
taxonomy_dict = taxonomy.getHierarchicalDict(english_name_filter=instagram_posts)
with cloudstorage.open(
JSON_OUT, 'w', content_type='application/json', options={'x-goog-acl': 'public-read'}) as f:
f.write(json.dumps(taxonomy_dict, separators=(',\n', ':')))
app = webapp2.WSGIApplication([
('/regenerate_json', MainPage),
], debug=True)
| tonygentilcore/nerdbirder | scripts/regenerate_json_web.py | Python | gpl-3.0 | 1,044 |
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from broadcasts.models import BroadcastMessage
from broadcasts.forms import BroadcastMessageForm
class BroadcastAdmin(admin.ModelAdmin):
"""Admin class for the broadcast messages"""
form = BroadcastMessageForm
list_display = (
'title', 'user_target', 'show_frequency', 'start_time',
'end_time', 'is_published')
list_filter = ('is_published', 'show_frequency', 'user_target')
search_fields = ['message', 'title']
fieldsets = (
(None, {
'fields': ('title', 'message', 'message_type',)
}),
(_('Message Targeting'), {
'fields': ('user_target', 'url_target')
}),
(_("Message Display"), {
'description': _(
"Messages will display only if they are published, "
"it is between the start and end times, and the show "
"frequency has not been exceeded."),
'fields': ('show_frequency', 'is_published',
('start_time', 'end_time'))
})
)
admin.site.register(BroadcastMessage, BroadcastAdmin)
| Natgeoed/django-broadcasts | broadcasts/admin.py | Python | mit | 1,181 |
import random
import time
import shelve
import math
import os
"""
IRC codes:
bold
4 hp red text
5 brown - Strength
11 dexterity
"""
channel = "#sunfields"
levels = [2,3,5,15,19,25,34,70,95,106,135,150,200,300,400,1000,100000]
heroes = {}
traits = ["spiky","cunning","burly","strong","ablaze","skilled","smart","barraging",
"accurate", "daredevil", "bottomless", "undying","quick to learn",
"drunk", "glass cannon"]
patch_notes = ["Sunfields 1.87",
"- A new spell!",
]
class Event():
def __init__(self, damage, action, events=None):
self.damage = damage
self.action = action
self.events = events
class Hero:
def __init__(self, owner, name, pre_traits = None):
if pre_traits == None:
self.traits = random.sample(traits,2)
else:
self.traits = (pre_traits + random.sample(traits,2))[:2]
self.name = name
self.owner = owner
self.base_str = self.base_dex = self.base_int = self.base_end = 3
self.extra_str = self.extra_dex = self.extra_end = self.extra_int = 0
self.level = 1
self.xp = 0
self.rank_points = 0
self.stat_points = 5
self.skill_points = 1
if "quick to learn" in self.traits:
self.skill_points += 1
self.wins = 0
self.losses = 0
if "strong" in self.traits:
self.base_str += 3
if "skilled" in self.traits:
self.stat_points += 2
if "drunk" in self.traits:
self.base_int -= 2
self.base_str += 7
self.base_dex -= 2
self.base_end += 4
if "glass cannon" in self.traits:
self.base_end -= 2
self.base_str -= 2
self.base_int += 7
self.spells = []
self.states = []
if name.lower() == "chuck norris":
self.base_str = 89
self.base_end = 67
self.base_dex = 29
self.base_end = 20
global heroes
heroes[owner] = self
@property
def win_percent(self):
if self.wins == 0 and self.losses == 0:
return "unranked"
elif self.losses == 0:
return "100%"
elif self.wins == 0:
return "0%"
else:
return str(round(float(self.wins)/float(self.wins+self.losses),2)*100)+"%"
@property
def dexterity(self):
return self.base_dex + self.extra_dex
@property
def strength(self):
return self.base_str + self.extra_str
@property
def intelligence(self):
return self.base_int + self.extra_int
@property
def endurance(self):
return self.base_end + self.extra_end
@property
def mana_regen(self):
return 0.25
def clean_up(self,enemy):
for i in self.spells:
i.cd_counter = i.cd_begin
self.extra_end = self.extra_str = self.extra_int = self.extra_dex
for s in self.states:
s.on_decay(enemy)
self.states = []
def on_turn(self,enemy):
self.mana += self.mana_regen
if "bottomless" in self.traits:
self.mana += 0.5
if "undying" in self.traits:
self.hp += 1
for s in self.states:
print "THIS IS STATE: ", s
s.update(enemy)
for i in self.spells:
d = 1
if "barraging" in self.traits:
d +=1
i.cd_counter -= d
self.mana = min(self.get_maxMP(),self.mana)
self.hp = min(self.get_maxHP(),self.hp)
def action(self,enemy):
"""
Decides what action to take
"""
for i in self.spells:
print i.cd_counter, i, i.owner.name
if i.can_cast(enemy):
return i.cast(enemy)
break
else:
print "Couldnt cast any spells, attacking!"
return self.damage(enemy)
def damage(self,enemy):
miss_chance = 0.13 - self.dexterity
if "drunk" in self.traits:
miss_chance = 0.33
if "accurate" in self.traits:
miss_chance -= 0.25
if random.random() < miss_chance:
return Event(-2,"tries to attack but misses!")
dam_type = "damages"
print self.strength
low = self.strength * 1.5 + self.dexterity * 0.5
high = self.strength * 1.5 + self.dexterity *1.5
if "daredevil" in self.traits:
high += self.dexterity * 1.5
print low, high
damage = random.randint(max(int(low),0), max(int(high),0))
print "low: ", low, " high: ", high
crit_chance = 0.05 + self.dexterity * 0.015
if "cunning" in self.traits:
crit_chance += 0.15
if random.random() < 0.05 + self.dexterity * 0.015:
if "spiky" in self.traits:
damage = damage *3
else:
damage = damage * 2
dam_type = "CRITS"
return Event(damage,dam_type)
def on_damage(self,enemy,damage):
for s in self.states:
s.on_damage(enemy,damage)
def on_damaged(self,enemy,damage):
print "yes hello, this is on_damaged speaking?"
print len(self.states)
for s in self.states:
print "This is ", s
s.on_damaged(enemy,damage)
def get_maxHP(self):
hp = 35+self.endurance* 8 + self.strength*2
if "burly" in self.traits:
hp += 20
if "daredevil" in self.traits:
hp -= 20
if "glass cannon" in self.traits:
hp -= 20
return hp
def get_maxMP(self):
mana = 10+self.base_int*3
if "smart" in self.traits:
mana += 15
return mana
def repr_stats(self, connection):
for i in ["The " + self.traits[0] + " and " + self.traits[1] + " " + self.name,
"unallocated stat points, skillpoints:" + str(self.stat_points) + ", " + str(self.skill_points),
"wins/losses: " + str(self.wins) +"/"+str(self.losses) + " " + self.win_percent,
"level, xp: " + str(self.level)+", 6"+ str(self.xp) + "1/6" + str(levels[self.level]),
"rank points: 11" + str(self.rank_points),
"owner: " + str(self.owner),
"str/dex/end/int: 5%s1/11%s1/3%s1/12%s"%(self.base_str,self.base_dex,self.base_end,self.base_int),
]:
connection.privmsg("#sunfields",i)
def learn(self,spell):
print "Trying to learn",spell
if self.skill_points > 0:
if learnable_skills.has_key(spell):
self.spells.append(learnable_skills[spell](self))
self.skill_points -= 1
print "Learned ",spell,"!"
def apply_state(self,state):
self.states.append(state)
def has_state(self,state_name):
n = len([s for s in self.states if s.name == state_name])
if n:
return n
else:
return False
class State():
duration = 0
dur_left = duration
name = "Stateless State"
def __init__(self,owner,enemy):
self.name = self.name
self.owner = owner
self.dur_left = self.duration
self.on_apply(enemy)
def update(self,enemy):
if self.dur_left < 0:
self.on_decay(enemy)
self.owner.states.remove(self)
return
self.dur_left -= 1
print self
self.on_update(enemy)
def on_apply(self,enemy):
pass
def on_decay(self,enemy):
pass
def on_update(self,enemy):
pass
def on_damaged(self,enemy,damage_dealt):
pass
def on_damage(self,enemy,damage_dealt):
pass
class Overpower_State(State):
duration = 1
name = "Overpower"
def on_apply(self,enemy):
print self.owner.name
self.owner.extra_str += 10
self.owner.extra_dex += 5
def on_decay(self,enemy):
self.owner.extra_str -= 10
self.owner.extra_dex -= 5
class Rejuvenation_State(State):
duration = 4
name = "Rejuvenation"
def on_apply(self,enemy):
self.owner.extra_end += 4
def on_update(self,enemy):
self.owner.hp += self.owner.intelligence
def on_decay(self,enemy):
self.owner.extra_end -= 4
class EternalFire_State(State):
name = "Eternal Fire"
duration = 4
def on_update(self,enemy):
self.owner.hp -= 7
class Thorns_State(State):
name = "Thorns"
duration = 3
def on_damaged(self,enemy,damage_dealt):
enemy.hp -= int(damage_dealt / 3.0)
class ManaShield_State(State):
name = "Mana Shield"
duration = 50
def on_damaged(self,enemy,damage_dealt):
mana_spent = min(self.owner.mana,damage_dealt/4.0)
self.owner.mana -= mana_spent
self.owner.hp += mana_spent*4
class CurseOfImpendingDeath_State(State):
name = "Curse of Death stack"
duration = 9001
class Spell():
mana_cost = 0
cooldown = 1
cd_counter = cooldown
cd_begin = cooldown
def __init__(self,owner):
self.owner = owner
self.cd_counter = self.cd_begin
def cast(self,enemy):
pass
def effect(self):
pass
def can_cast(self,enemy):
"""
if it can cast, will return True and do all mana cost stuff
"""
if self.cd_counter > 0:
print "cooldown: ", self.cd_counter
return False
if self.mana_cost > self.owner.mana:
print "not enough mana: ",self.mana_cost, " vs owners ",self.owner.mana
return False
else:
if self.will_cast(enemy):
self.owner.mana -= self.mana_cost
self.cd_counter = self.cooldown
return True
else:
return False
def will_cast(self,enemy):
return True
class Healing(Spell):
"""
Uses the spirit of Sungod to bathe yourself in a healing light!
Heals 15+int*4 to 20+int*6 health"""
mana_cost = 6
cooldown = 3
cd_counter = cooldown
def cast(self,enemy):
i = self.owner.intelligence
heal_amount = random.randint(int(12+i*3.5), int(18+i*5))
self.owner.hp += heal_amount
return Event(-1,"8Healing1, healing himself for 4"+str(heal_amount)+"4 hp!")
def will_cast(self,enemy):
if self.owner.get_maxHP() - self.owner.hp < 20+self.owner.intelligence*6 - 15:
return False
else:
return True
class Rejuvenation(Spell):
"""Watch out, it's getting HoT in here!"""
mana_cost = 8
cooldown = 6
cd_begin = 2
def cast(self,enemy):
self.owner.apply_state(Rejuvenation_State(self.owner,enemy))
return Event(-1, "3Rejuvenation1, giving him some endurance and healing him every turn")
def will_cast(self,enemy):
if self.owner.get_maxHP() - self.owner.hp < self.owner.intelligence * 2:
return False
else:
return True
class EternalFire(Spell):
"""Watch out, it's getting DoT in here!"""
mana_cost = 10
cooldown = 5
cd_begin = 1
def cast(self,enemy):
enemy.apply_state(EternalFire_State(enemy,enemy))
return Event(-1, "5Eternal Fire1, burning his enemy!")
class Fireball(Spell):
"""
Throws a mighty fireball towards your enemy, dealing huge damage!
Deals 8+int*3 to 14+int*4 damage"""
mana_cost = 6
cooldown = 3
cd_counter = cooldown
def cast(self,enemy):
return Event(self.damage(),"casts a 7Fireball1 at")
def damage(self):
i = self.owner.intelligence
damage = random.randint(8+int(i*3),14+i*4)
if "ablaze" in self.owner.traits:
damage += 10
return damage
class CatsGrace(Spell):
"""
You are so agile you are extremely agile!
Gives you 1+int/8 to 2+int/3 dex for the rest of the fight"""
cooldown = 3
mana_cost = 6
def cast(self,enemy):
i = self.owner.intelligence
dx = random.randint(1+i/8,2+i/3)
self.owner.extra_dex += dx
return Event(-1, "10Cat's Grace1, increasing his dexterity by "+str(dx)+"!")
class Overpower(Spell):
"""
RAAAWR! You use the spirit of Sungod to grant yourself strength!
Grants 10 strength and 5 dexterity for one round"""
cooldown = 4
mana_cost = 4
def cast(self,enemy):
self.owner.apply_state(Overpower_State(self.owner,enemy))
return Event(-1,"14Overpower1, making his/her next attack mighty frightening!")
class Thorns(Spell):
"""
Touch me, I wanna feel your damage!
Returns damage to the enemy when you are damaged."""
cooldown = 7
cd_begin = 1
duration = 4
mana_cost = 14
def cast(self,enemy):
self.owner.apply_state(Thorns_State(self.owner,enemy))
return Event(-1,"3Thorns1, making the enemy take damage when they hit him!")
class ManaShield(Spell):
"""ahue"""
cooldown = 9001
cd_begin = 0
mana_cost = 6
def cast(self,enemy):
self.owner.apply_state(ManaShield_State(self.owner,enemy))
return Event(-1,"2Mana Shield1 to use his mana to protect his vitality")
class ManaDrain(Spell):
"""mmmm yummy!"""
cooldown = 5
cd_begin = 3
mana_cost = 7
def cast(self,enemy):
i = self.owner.intelligence
mana_drained = 12 + i/2
m = min(enemy.mana,mana_drained)
self.owner.mana += m
enemy.mana -= m
enemy.hp -= int(m/2)
return Event(-1,"2Mana Drain1, draining 2"+str(m)+"1 mana, and dealing 10"+str(int(m/2.0))+"1 to his opponent!")
def will_cast(self,enemy):
i = self.owner.intelligence
if self.owner.mana + 6 + i/4 < self.owner.get_maxMP() and enemy.mana > 7:
return True
else:
return False
class CurseOfImpendingDeath(Spell):
"""Kill your enemy! Slooowly!"""
cooldown = 3
mana_cost = 5
def cast(self,enemy):
enemy.apply_state(CurseOfImpendingDeath_State(self.owner,enemy))
stacks = enemy.has_state("Curse of Death stack")
damage = 10 + stacks * int((3 + self.owner.intelligence/3))
return Event(damage, "2 Impending Death, the clock ticks, tick tock... ")
class LifeDrain(Spell):
"""Mmmm, tasty!"""
cooldown = 4
mana_cost = 7
def cast(self,enemy):
damage = 10 + self.owner.intelligence * 3
self.owner.hp += damage
return Event(damage, "2 Life Drain, draining %s health for himself!"%(damage))
def update_stats(winner, loser, connection):
winner.wins += 1
loser.losses += 1
highest = max(winner.level,loser.level)
lowest = max(winner.level,loser.level)
rank_dif = winner.rank_points - abs(winner.rank_points - loser.rank_points)
#xp = max(int((min(5,1+(highest-lowest/highest)) * rank_dif / 100.0)),0)
rank_points = min(5,(winner.rank_points - loser.rank_points)/4+5) #temp calculation
xp = loser.level
winner.xp += xp
loser.xp += xp/5
winner.rank_points += rank_points
loser.rank_points -= rank_points
if winner.level < len(levels):
if winner.xp > levels[winner.level]:
winner.level += 1
winner.stat_points += 1
winner.xp = 0
winner.skill_points += 1
combat.log.append([winner.name + " has leveled up!",winner.name + " is now level " + str(winner.level)])
return rank_points, xp
def _get_hero(owner):
try:
return heroes[owner]
except:
repr(heroes)
def add_log(message):
global log
if isinstance(message,list):
log.append(message)
else:
log.append([message])
class Combat():
def __init__(self, users, connection, auto=True):
self.turn = 1
self.team1 = users[0]
self.team2 = users[1]
self.users = zip(users[0],users[1])
for u in users:
u.hp = u.get_maxHP()
u.mana = u.get_maxMP()
self.attack = False
self.l = []
self.log = []
a_action = None
b_action = None
if auto:
winner = None
while not winner:
self.fight_turn()
if all(hp < 0 for hp in self.team2):
winner = self.defender
loser = self.attacker
elif all(hp < 0 for hp in self.team1):
winner = self.attacker
loser = self.defender
else:
winner = "???"
print "Something terribly wrong here"
else:
global active_combat
active_combat = self
self.attacker.clean_up(self.defender)
self.defender.clean_up(self.attacker)
rank,xp = update_stats(winner,loser,connection)
self.log.append(["AND THE WINNER IS... " + winner.name + "! Winning him/her an amazing 11" + str(rank) + "1 ranking and 6" + str(xp) + "1 xp!"])
self.log_summarize(connection)
def fight_turn(self):#, a_action, d_action):@TODO: Implement
if not self.attack:
self.l.append("TURN "+str(self.turn)+"!")
if self.attack:
event = self.attacker.action(self.defender)
self.defender.hp -= max(event.damage,0)
self.attacker.on_turn(self.defender)
if event.damage > 0:
self.l.append("%s %s %s for 10%s1 damage!"%(self.attacker.name,
event.action,
self.defender.name,
event.damage))
self.attacker.on_damage(self.defender,event.damage)
self.defender.on_damaged(self.attacker,event.damage)
elif event.damage == -1:
self.l.append("%s uses %s"%(self.attacker.name,event.action))
elif event.damage == -2:
self.l.append("%s %s"%(self.attacker.name,
event.action))
else:
event = self.defender.action(self.attacker)
self.attacker.hp -= event.damage
self.defender.on_turn(self.attacker)
if event.damage > 0:
self.l.append("%s %s %s for 10%s1 damage!"%(self.defender.name,
event.action,
self.attacker.name,
event.damage))
self.attacker.on_damaged(self.defender,event.damage)
self.defender.on_damage(self.attacker,event.damage)
elif event.damage == -1:
self.l.append("%s uses %s"%(self.defender.name,
event.action
))
elif event.damage == -2:
self.l.append("%s %s"%(self.defender.name,
event.action))
if self.attack:
self.turn +=1
self.l.append(self.log_status(self.attacker, self.defender))
self.log.append(self.l)
self.l = []
self.attack = not self.attack
def log_status(self, attacker, defender):
s1 = "%s hp: 4%s1/4%s1 mp: 2%s1/2%s1 "%(attacker.name, attacker.hp,attacker.get_maxHP(),int(attacker.mana),attacker.get_maxMP())
s1 = s1.ljust(20)
mid = " vs "
s2 = "%s hp: 4%s1/4%s1 mp: 2%s1/2%s1 "%(defender.name,defender.hp,defender.get_maxHP(),int(defender.mana),defender.get_maxMP())
s2 = s2.rjust(20)
return s1 + mid + s2
def log_summarize(self,connection):
sleep = 0
sleep_interval = 2
for i in self.log[-50:]:
for l in i:
connection.privmsg("#sunfields",l)
time.sleep(2)
self.log = []
def create_hero(owner, name,trts=None):
if trts:
Hero(owner,name,trts)
else:
Hero(owner,name,trts)
def spend_point(hero, stat,i):
if i <= hero.stat_points:
if stat == "str" or stat == "strength":
hero.stat_points -= i
hero.base_str += i
elif stat == "int" or stat == "intelligence":
hero.stat_points -= i
hero.base_int += i
elif stat == "dex" or stat == "dexterity":
hero.stat_points -= i
hero.base_dex += i
elif stat == "end" or stat == "endurance":
hero.stat_points -= i
hero.base_end += i
def on_msg(connection,event):
cmds = {
"create":create_hero,
}
if event.target() != channel:
return
speaker = event.source().split('!') [0]
msg = event.arguments()[0]
print "Sunfield>" + speaker +": " + msg
print "Sunfield cmd: "+msg
cmd = msg.split()
command,args = cmd[0],cmd[1:]
print "Command: ", command, "Args: ", args
if speaker in combat.users:
if combat.valid_command(user, cmd):
combat.user_do(user, cmd)
return
if command == "fight" or command == "c":
global heroes
print heroes
a = " ".join(args)
if not args:
pass
elif a == speaker:
pass
elif a not in heroes.keys():
pass
else:
Combat(_get_hero(speaker),_get_hero(a),connection)
elif command == "create" or command == "c":
a = " ".join(args)
name,waste,trts = a.partition(",")
if trts:
trts = [t.strip(" ") for t in trts.split(",")]
if a == "":
pass
else:
if trts:
create_hero(speaker,name,trts)
else:
create_hero(speaker,name)
elif command == "spend":
i = 1
if len(args) == 3 and args[1] == "*":
i = int(args[2])
spend_point(_get_hero(speaker),args[0],i)
elif command == "stats":
if not len(args) or args[0] == "":
hero = _get_hero(speaker)
elif heroes.has_key(args[0]):
hero = _get_hero(args[0])
else:
hero = None
if hero:
hero.repr_stats(connection)
elif command == "wipeYESIAMSURE":
heroes = {}
create_Sungod()
create_dummy_hero("Dummy Weak",end=5)
create_dummy_hero("Dummy Strong",end=10)
elif command == "rename":
if not len(args):
pass
else:
heroes[speaker].name = " ".join(args)
elif command == "save":
save_heroes()
elif command == "learn":
_get_hero(speaker).learn(" ".join(args))
elif command == "skills":
connection.privmsg("#sunfields", ", ".join(learnable_skills.keys()))
elif command == "traits":
for i in traits:
connection.privmsg("#sunfields",i)
elif command == "retrain":
print "-----retrain-----"
print " ".join(args).split(",")
t = [i for i in (" ".join(args)).split(",") if i != " "]
print t
new_traits = [t.strip(" ") for t in (" ".join(args)).split(",") if t != " "][:2]
print new_traits
print len(new_traits)
if len(traits) == 1:
new_traits.append(random.choice([i for i in traits if i != new_traits[0]]))
elif len(traits) == 0:
new_traits = random.sample(traits,2)
h = _get_hero(speaker)
print h.traits
h.traits = new_traits
print h.traits
elif command == "patch":
display_patch(connection)
elif command == "info":
a = " ".join(args)
if learnable_skills.has_key(a):
connection.privmsg("%sunfields","skill: "+a)
for h in learnable_skills[a].__doc__.split("\n"):
connection.privmsg("#sunfields",h)
elif command == "heroes":
for i in heroes.values():
connection.privmsg("#sunfields",i.name+" ("+i.owner+")")
if hasattr(cmds,command):
#doesnt seem to work
print "oh my! cmd!"
cmds[command](*args)
else:
print "no have!"
def save_heroes(config = "stats\\heroes.db"):
s = shelve.open(config)
for k,v in heroes.items():
s[k] = v
def load_heroes(config = "stats\\heroes.db"):
global heroes
print os.path.abspath(os.path.curdir)
print os.path.abspath(config)
try:
s = shelve.open(config)
except Exception:
f = open(config, "wb")
f.close()
s = shelve.open(config)
for k,v in s.items():
print k,":",v
heroes[k] = v
def create_Sungod():
h = Hero("Sungod","Sungod")
h.level = 15
h.stat_points += 14
h.skill_points = len(learnable_skills.keys())+1
for i in range(h.stat_points):
i = random.randint(1,4)
if i == 1:
h.base_str += 1
elif i == 2:
h.base_dex += 1
elif i == 3:
h.base_int += 1
elif i == 4:
h.base_end += 1
h.stat_points = 0
for s in random.sample(learnable_skills.keys(),6):
h.learn(s)
def create_dummy_hero(name,end=5):
h = Hero(name,name)
h.base_dex = 0
h.base_end = end
h.base_int = 5
h.base_str = 0
h.learn("Thorns")
h.skill_points = 0
h.stat_points = 0
def display_patch(connection):
for i in patch_notes:
connection.privmsg("#sunfields",i)
def init(heroes_dict):
# heroes = heroes_dict
load_heroes()
print "HEROES: ", heroes
if not heroes.has_key("Sungod"):
create_Sungod()
if not heroes.has_key("Dummy"):
create_dummy_hero("Dummy Weak",end=5)
create_dummy_hero("Dummy Strong",end=10)
learnable_skills = {"Fireball":Fireball,"Healing":Healing,"Cat's Grace":CatsGrace,
"Overpower":Overpower,"Eternal Fire":EternalFire,"Rejuvenation":Rejuvenation,"Thorns":Thorns,"Life Drain":LifeDrain,
"Mana Shield":ManaShield,"Mana Drain":ManaDrain,"Curse Of Impending Death":CurseOfImpendingDeath}
| Sebsebeleb/Sungod | libs/arena.py | Python | gpl-2.0 | 26,381 |
#www.stuffaboutcode.com
#Raspberry Pi, Minecraft Snake
#import the minecraft.py module from the minecraft directory
import minecraft
#import minecraft block module
import block
#import time, so delays can be used
import time
#import random module to create random number
import random
HOUSEWIDTH=6
HOUSEHEIGHT=2
def buildHouse(mc, x, y, z):
#draw floor
mc.setBlocks(x,y-1,z,x+HOUSEWIDTH,y-1,z+HOUSEWIDTH,block.GRASS.id)
#draw walls
mc.setBlocks(x, y, z, x+HOUSEWIDTH, y+HOUSEHEIGHT, z, block.STONE.id)
mc.setBlocks(x+HOUSEWIDTH, y, z, x+HOUSEWIDTH, y+HOUSEHEIGHT, z+HOUSEWIDTH, block.STONE.id)
mc.setBlocks(x+HOUSEWIDTH, y, z+HOUSEWIDTH, x, y+HOUSEHEIGHT, z+HOUSEWIDTH, block.STONE.id)
mc.setBlocks(x, y, z+HOUSEWIDTH, x, y+HOUSEHEIGHT, z, block.STONE.id)
#draw windows
mc.setBlocks(x+(HOUSEWIDTH/2)-2,y+1,z,x+(HOUSEWIDTH/2)-2,y+2,z,block.GLASS.id)
mc.setBlocks(x+(HOUSEWIDTH/2)+2,y+1,z,x+(HOUSEWIDTH/2)+2,y+2,z,block.GLASS.id)
#draw door
#cobble arch
mc.setBlocks(x+(HOUSEWIDTH/2)-1,y,z,x+(HOUSEWIDTH/2)+1,y+2,z,block.COBBLESTONE.id)
# clear space for door
mc.setBlocks(x+(HOUSEWIDTH/2),y,z,x+(HOUSEWIDTH/2),y+1,z,block.AIR.id)
#draw torches
mc.setBlock(x+(HOUSEWIDTH/2)-1,y+2,z-1,block.TORCH.id,1)
mc.setBlock(x+(HOUSEWIDTH/2)+1,y+2,z-1,block.TORCH.id,1)
#draw roof
mc.setBlocks(x,y+HOUSEHEIGHT+1,z,x+HOUSEWIDTH,y+HOUSEHEIGHT+1,z+HOUSEWIDTH,block.WOOD_PLANKS.id)
def clearHouse(mc, x, y, z):
mc.setBlocks(x,y-1,z,x+HOUSEWIDTH,y+HOUSEHEIGHT+1,z+HOUSEWIDTH,block.AIR.id)
#main program
if __name__ == "__main__":
time.sleep(3)
#Connect to minecraft by creating the minecraft object
# - minecraft needs to be running and in a game
mc = minecraft.Minecraft.create()
playersPath = []
lastPlayerPos = mc.player.getTilePos()
playersPath.append(lastPlayerPos)
lastHousePos = None
while(True):
playerPos = mc.player.getTilePos()
if playerPos != lastPlayerPos:
playersPath.append(playerPos)
lastPlayerPos = playerPos
#when a player has moved 15 blocks, moved their house and reset the path
if len(playersPath) == 15:
#clear the old house (if there was one)
if lastHousePos is not None:
clearHouse(mc, lastHousePos.x, lastHousePos.y, lastHousePos.z)
#create house 10 blocks back, we dont want the house on top of us!
lastHousePos = playersPath[5]
lastHousePos.y = mc.getHeight(lastHousePos.x,lastHousePos.z)
buildHouse(mc,lastHousePos.x, lastHousePos.y, lastHousePos.z)
#clear list
playersPath[:] = []
| martinohanlon/minecraft-houses | minecraft-house-follow.py | Python | mit | 2,760 |
"""
This module contains base functions to parse different layouts
of the excel files produced by the TECAN infinite pro.
"""
import re
import datetime
import xlrd
import numpy as np
from ..curves import Curve
def parse_tecan(filename, sheet_index=None, info=False):
""" Parses a .xlsx file from a cinetic experiment
File specifications:
"""
sheets = workbook2numpy(filename, sheet_index=sheet_index)
if info:
info_dict = get_info(sheets)
if isinstance(sheets, list):
starts = map(find_start_in_sheet, sheets)
t0 = min([s for s in starts if (s is not None)])
if info:
return [[parse_sheet(sheet, t0=t0)[1] for sheet in sheets], info_dict]
else:
return [parse_sheet(sheet, t0=t0)[1] for sheet in sheets]
else:
if info:
return [parse_sheet(sheets), info_dict]
else:
return parse_sheet(sheets)
def get_info(sheets):
info_dict = {}
if isinstance(sheets, list):
i = 0
while len(sheets[i][0]) == 0:
i += 1
sheet = sheets[i]
i = 0
print("SHEET", sheet.shape)
modeindex = 0
nameindex = 0
while i < sheet.shape[0]:
if sheet[i][0].startswith('List of actions'):
print("ACTIONS", i)
info_dict["actions"] = []
i += 1
while not ('Label' in sheet[i][0]):
if len(sheet[i][0]) != 0:
linelist = [var for var in sheet[i] if var]
info_dict["actions"].append(
[linelist[0], ' '.join(linelist[1:])])
i += 1
if sheet[i][0].startswith('Mode'):
print("MODE", i)
linelist = [var for var in sheet[i] if var]
info_dict[modeindex] = [[linelist[0], ' '.join(linelist[1:])]]
i += 1
while not (sheet[i][0].startswith('Mode') or len(sheet[i][0]) == 0 or sheet[i][0].startswith('Start Time')):
linelist = [var for var in sheet[i] if var]
info_dict[modeindex].append(
[linelist[0], ' '.join(linelist[1:])])
i += 1
if len(sheet[i][0]) != 0:
i -= 1
modeindex += 1
if sheet[i][0].startswith('Start Time'):
linelist = [var for var in sheet[i] if var]
info_dict["Start Time"] = ' '.join(linelist[1:])
if sheet[i][0].startswith('Cycle Nr'):
info_dict[nameindex].append(['Name', sheet[i - 1][0]])
nameindex += 1
i += 1
return info_dict
def workbook2numpy(filename, sheet_index=None):
""" loads the xlsx file as a (Numpy) array, or list of
numpy arrays if there are several sheets.
If `sheetindex` is None, """
book = xlrd.open_workbook(filename)
sheets = np.array(book.sheets())
if sheet_index is None:
sheet_index = range(len(sheets))
if np.isscalar(sheet_index):
return sheet2numpy(sheets[0])
else:
res = []
for sh in sheets[sheet_index]:
try:
res.append(sheet2numpy(sh))
except:
pass
return res[0] if len(res) == 1 else res
def find_start_in_sheet(sheet):
for line in sheet:
if len(line) == 0:
pass
elif line[0] == "Start Time:":
return date2seconds(line[1])
return None
def sheet2numpy(sheet):
""" Conversts a xlread Excel sheet to a numpy array """
X, Y = sheet.ncols, sheet.nrows
arr = [[sheet.cell(y, x).value for x in range(X)]
for y in range(Y)]
return np.array(arr)
def parse_sheet(sheet, t0=None):
wells_dict = {"%s%d" % (c, i): dict() for c in "ABCDEFGH"
for i in range(1, 13)}
start_time = 0
for i, line in enumerate(sheet):
if len(line) == 0:
pass
elif line[0] == "Start Time:":
start_time = date2seconds(line[1])
if t0 is None:
t0 = start_time
start_time = start_time - t0
parse_labels(sheet, i, wells_dict, start_time)
return t0, wells_dict
def parse_labels(sheet, i, wells_dict, start_time):
"""
Parses the different labels encountered (and fills the given
plate until an "End Time:" cell is found.
"""
j = i
while sheet[j][0] != "End Time:":
if sheet[j][0] == "Cycle Nr.":
parse_label(sheet, j, wells_dict, start_time)
j += 1
def parse_label(sheet, i, wells_dict, start_time=0,
timePerWell=True, over_replace=-1,
per_column=False):
"""
Parses an array of measurements, supposing that
line i of arr in the first line of an array of the form:
Cycle Nr, 1, 2
Time [s], 0, 34.5
Temp. [C], 23.5, 23.5
A1, 0.3174999952, 0.3181999922
t 00 30 <- time per well activated
A2, 0.3980999887, 0.4104000032
t 02 32
"""
label = sheet[i - 1, 0]
if per_column:
sheet = sheet[i:, :].T
i = 0
try:
xmax = list(sheet[i]).index(u'') - 1
except:
xmax = len(list(sheet[i]))
if sheet[i + 1][1] == '':
# return if the first data element is empty (meaning all data should be
# empty)
return
if not timePerWell:
# read the times once and for all
tt = sheet[i + 1, 1:xmax].astype(float) / 60000 + start_time
j = i + 3
else:
j = i + 2
while (j < sheet.shape[0]) and (sheet[j, 0] != u''):
try:
xmax = list(sheet[j]).index(u'') - 1
except:
xmax = len(list(sheet[j]))
try:
well = sheet[j, 0]
if timePerWell:
tt = sheet[j + 1, 1:xmax].astype(float) / 60000 + start_time
yy = sheet[j, 1:xmax]
yy[yy == 'OVER'] = over_replace
curve = Curve(tt.astype(float), yy.astype(float))
if not (label in wells_dict[well].keys()):
wells_dict[well][label] = curve
else:
wells_dict[well][label] = wells[well][label].merge_with(curve)
j += 2 if timePerWell else 1
except:
j += 2 if timePerWell else 1
continue
pass
def merge_wells_dicts(wells_dicts):
"""
Merges the dictionnaries
"""
result = {"%s%d" % (c, i): dict() for c in "ABCDEFGH"
for i in range(1, 13)}
for wells_dict in wells_dicts:
for well, curves_dict in wells_dict.items():
for label, curve in curves_dict.items():
if not (label in result[well].keys()):
result[well][label] = curve
else:
result[well][label] =\
result[well][label].merge_with(curve)
return result
def to_coords(s):
""" Converts "A5", "C11", ... into (0,5), (2,11) ... """
return ("ABCDEFGH".index(s[0]), int(s[1:]) - 1)
def date2seconds(timeString):
"""
Converts a Tecan date string ("08/10/2013 17:44:24")
into seconds since 1970/1/1
"""
template = "(\d+)/(\d+)/(\d+) (\d+):(\d+):(\d+)"
matching = re.match(template, timeString)
day, mth, yr, hr, mn, sec = map(int, matching.groups())
t1 = datetime.datetime(yr, mth, day, hr, mn, sec)
t0 = datetime.datetime(1970, 1, 1)
return (t1 - t0).total_seconds()
| ibis-inria/wellFARE | wellfare/parsing/tecan.py | Python | lgpl-3.0 | 7,472 |
import os
import re
import gtk
import gio
import gobject
import pango
from xdg import BaseDirectory as base
from xdg import DesktopEntry as desktop
from kupfer import config, pretty, utils, icons, version
from kupfer import scheduler, kupferstring
from kupfer.core import settings, plugins, relevance, sources
from kupfer.ui import keybindings
from kupfer.ui.credentials_dialog import ask_user_credentials
from kupfer.ui import getkey_dialog
from kupfer import plugin_support
# index in GtkNotebook
PLUGIN_LIST_PAGE = 2
# List icon pixel size
LIST_ICON_SIZE = 18
# A major HACK
# http://tadeboro.blogspot.com/2009/05/wrapping-adn-resizing-gtklabel.html
def _cb_allocate(label, allocation, maxwid):
if maxwid == -1:
maxwid = 300
label.set_size_request(min(maxwid, allocation.width), -1)
pass
def wrapped_label(text=None, maxwid=-1):
label = gtk.Label(text)
label.set_line_wrap(True)
label.connect("size-allocate", _cb_allocate, maxwid)
return label
def kobject_should_show(obj):
try:
leaf_repr = obj.get_leaf_repr()
except AttributeError:
pass
else:
if leaf_repr is None:
return True
if hasattr(leaf_repr, "is_valid") and not leaf_repr.is_valid():
return False
return True
class PreferencesWindowController (pretty.OutputMixin):
KEYBINDING_NAMES = {
# TRANS: Names of global keyboard shortcuts
'keybinding': _("Show Main Interface"),
'magickeybinding': _("Show with Selection"),
}
KEYBINDING_TARGETS = {
"keybinding": keybindings.KEYBINDING_DEFAULT,
"magickeybinding": keybindings.KEYBINDING_MAGIC,
}
ACCELERATOR_NAMES = {
# TRANS: Names of accelerators in the interface
'activate': _('Alternate Activate'),
# TRANS: The "Comma Trick"/"Put Selection on Stack" allows the
# TRANS: user to select many objects to be used for one action
'comma_trick': _('Comma Trick'),
# TRANS: "Compose Command" makes one object out of the selected
# TRANS: object + action (+iobject)
'compose_action': _('Compose Command'),
'reset_all': _('Reset All'),
'select_quit': _('Select Quit'),
'select_selected_file': _('Select Selected File'),
'select_selected_text': _('Select Selected Text'),
'show_help': _('Show Help'),
'show_preferences': _('Show Preferences'),
'switch_to_source': _('Switch to First Pane'),
"toggle_text_mode_quick": _('Toggle Text Mode'),
}
def __init__(self):
"""Load ui from data file"""
builder = gtk.Builder()
builder.set_translation_domain(version.PACKAGE_NAME)
ui_file = config.get_data_file("preferences.ui")
if ui_file:
builder.add_from_file(ui_file)
else:
self.window = None
return
builder.connect_signals(self)
self.window = builder.get_object("preferenceswindow")
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.connect("delete-event", self._close_window)
self.pluglist_parent = builder.get_object("plugin_list_parent")
self.dirlist_parent = builder.get_object("directory_list_parent")
self.plugin_about_parent = builder.get_object("plugin_about_parent")
self.preferences_notebook = builder.get_object("preferences_notebook")
self.buttonremovedirectory = builder.get_object("buttonremovedirectory")
checkautostart = builder.get_object("checkautostart")
checkstatusicon = builder.get_object("checkstatusicon")
checkcloseonunfocus = builder.get_object("checkcloseonunfocus")
checkusecommandkeys = builder.get_object("checkusecommandkeys")
self.entry_plugins_filter = builder.get_object('entry_plugins_filter')
self.keybindings_list_parent = builder.get_object('keybindings_list_parent')
self.gkeybindings_list_parent = builder.get_object('gkeybindings_list_parent')
source_list_parent = builder.get_object("source_list_parent")
self.sources_list_ctrl = SourceListController(source_list_parent)
setctl = settings.GetSettingsController()
checkautostart.set_active(self._get_should_autostart())
checkstatusicon.set_active(setctl.get_show_status_icon())
checkusecommandkeys.set_active(setctl.get_use_command_keys())
checkcloseonunfocus.set_active(setctl.get_close_on_unfocus())
# Plugin List
columns = [
{"key": "plugin_id", "type": str },
{"key": "enabled", "type": bool },
{"key": "icon-name", "type": str },
{"key": "markup", "type": str },
]
# setup plugin list table
column_types = [c["type"] for c in columns]
self.columns = [c["key"] for c in columns]
self.store = gtk.ListStore(*column_types)
self.table = gtk.TreeView(self.store)
self.table.set_headers_visible(False)
self.table.set_property("enable-search", False)
self.table.set_rules_hint(True)
self.table.connect("cursor-changed", self.plugin_table_cursor_changed)
self.table.get_selection().set_mode(gtk.SELECTION_BROWSE)
checkcell = gtk.CellRendererToggle()
checkcol = gtk.TreeViewColumn("item", checkcell)
checkcol.add_attribute(checkcell, "active",
self.columns.index("enabled"))
checkcell.connect("toggled", self.on_checkplugin_toggled)
icon_cell = gtk.CellRendererPixbuf()
icon_cell.set_property("height", LIST_ICON_SIZE)
icon_cell.set_property("width", LIST_ICON_SIZE)
icon_col = gtk.TreeViewColumn("icon", icon_cell)
icon_col.add_attribute(icon_cell, "icon-name",
self.columns.index("icon-name"))
cell = gtk.CellRendererText()
col = gtk.TreeViewColumn("item", cell)
col.add_attribute(cell, "markup", self.columns.index("markup"))
self.table.append_column(checkcol)
# hide icon for now
#self.table.append_column(icon_col)
self.table.append_column(col)
self.plugin_list_timer = scheduler.Timer()
self.plugin_info = utils.locale_sort(plugins.get_plugin_info(),
key= lambda rec: rec["localized_name"])
self._refresh_plugin_list()
self.output_debug("Standard Plugins: %d" % len(self.store))
self.table.show()
self.pluglist_parent.add(self.table)
# Directory List
self.dir_store = gtk.ListStore(str, gio.Icon, str)
self.dir_table = gtk.TreeView(self.dir_store)
self.dir_table.set_headers_visible(False)
self.dir_table.set_property("enable-search", False)
self.dir_table.connect("cursor-changed", self.dir_table_cursor_changed)
self.dir_table.get_selection().set_mode(gtk.SELECTION_BROWSE)
icon_cell = gtk.CellRendererPixbuf()
icon_col = gtk.TreeViewColumn("icon", icon_cell)
icon_col.add_attribute(icon_cell, "gicon", 1)
cell = gtk.CellRendererText()
col = gtk.TreeViewColumn("name", cell)
col.add_attribute(cell, "text", 2)
cell.set_property("ellipsize", pango.ELLIPSIZE_END)
self.dir_table.append_column(icon_col)
self.dir_table.append_column(col)
self.dir_table.show()
self.dirlist_parent.add(self.dir_table)
self.read_directory_settings()
# keybindings list
self.keybind_table, self.keybind_store = _create_conf_keys_list()
self.keybindings_list_parent.add(self.keybind_table)
self.keybind_table.connect("row-activated", self.on_keybindings_row_activate)
# global keybindings list
self.gkeybind_table, self.gkeybind_store = _create_conf_keys_list()
self.gkeybindings_list_parent.add(self.gkeybind_table)
self.gkeybind_table.connect("row-activated",
self.on_gkeybindings_row_activate)
self._show_keybindings(setctl)
self._show_gkeybindings(setctl)
def _show_keybindings(self, setctl):
names = self.KEYBINDING_NAMES
self.keybind_store.clear()
for binding in sorted(names, key=lambda k: names[k]):
accel = setctl.get_global_keybinding(binding)
label = gtk.accelerator_get_label(*gtk.accelerator_parse(accel))
self.keybind_store.append((names[binding], label, binding))
def _show_gkeybindings(self, setctl):
names = self.ACCELERATOR_NAMES
self.gkeybind_store.clear()
for binding in sorted(names, key=lambda k: names[k]):
accel = setctl.get_accelerator(binding)
label = gtk.accelerator_get_label(*gtk.accelerator_parse(accel))
self.gkeybind_store.append((names[binding], label, binding))
def read_directory_settings(self):
setctl = settings.GetSettingsController()
dirs = setctl.get_directories()
for d in dirs:
self.add_directory_model(d, store=False)
def add_directory_model(self, d, store=False):
have = list(os.path.normpath(row[0]) for row in self.dir_store)
if d in have:
self.output_debug("Ignoring duplicate directory: ", d)
return
else:
have.append(d)
d = os.path.expanduser(d)
dispname = utils.get_display_path_for_bytestring(d)
gicon = icons.get_gicon_for_file(d)
self.dir_store.append((d, gicon, dispname))
if store:
setctl = settings.GetSettingsController()
setctl.set_directories(have)
def remove_directory_model(self, rowiter, store=True):
dirpath = self.dir_store.get_value(rowiter, 0)
self.dir_store.remove(rowiter)
if store:
have = list(os.path.normpath(row[0]) for row in self.dir_store)
setctl = settings.GetSettingsController()
setctl.set_directories(have)
def on_preferenceswindow_key_press_event(self, widget, event):
if event.keyval == gtk.gdk.keyval_from_name("Escape"):
self.hide()
return True
def on_checkstatusicon_toggled(self, widget):
setctl = settings.GetSettingsController()
setctl.set_show_status_icon(widget.get_active())
def _get_should_autostart(self):
KUPFER_DESKTOP = "kupfer.desktop"
AUTOSTART_KEY = "X-GNOME-Autostart-enabled"
autostart_dir = base.save_config_path("autostart")
autostart_file = os.path.join(autostart_dir, KUPFER_DESKTOP)
if not os.path.exists(autostart_file):
return False
dfile = desktop.DesktopEntry(autostart_file)
return (dfile.hasKey(AUTOSTART_KEY) and
dfile.get(AUTOSTART_KEY, type="boolean"))
def on_checkautostart_toggled(self, widget):
KUPFER_DESKTOP = "kupfer.desktop"
AUTOSTART_KEY = "X-GNOME-Autostart-enabled"
autostart_dir = base.save_config_path("autostart")
autostart_file = os.path.join(autostart_dir, KUPFER_DESKTOP)
if not os.path.exists(autostart_file):
desktop_files = list(base.load_data_paths("applications",
KUPFER_DESKTOP))
if not desktop_files:
self.output_error("Installed kupfer desktop file not found!")
return
desktop_file_path = desktop_files[0]
# Read installed file and modify it
dfile = desktop.DesktopEntry(desktop_file_path)
executable = dfile.getExec()
if "--no-splash" not in executable:
executable += " --no-splash"
dfile.set("Exec", executable)
else:
dfile = desktop.DesktopEntry(autostart_file)
activestr = str(bool(widget.get_active())).lower()
self.output_debug("Setting autostart to", activestr)
dfile.set(AUTOSTART_KEY, activestr)
dfile.write(filename=autostart_file)
def on_entrykeybinding_changed(self, widget):
pass
def on_buttonkeybinding_clicked(self, widget):
keystr = getkey_dialog.ask_for_key(keybindings.bind_key)
if keystr:
self.entrykeybinding.set_text(keystr)
self.output_debug("Try set keybinding with", keystr)
succ = keybindings.bind_key(keystr)
setctl = settings.GetSettingsController()
setctl.set_keybinding(keystr)
def on_helpbutton_clicked(self, widget):
pass
def on_closebutton_clicked(self, widget):
self.hide()
def _refresh_plugin_list(self, us_filter=None):
"List plugins that pass text filter @us_filter or list all if None"
self.store.clear()
setctl = settings.GetSettingsController()
if us_filter:
self.plugin_list_timer.set_ms(300, self._show_focus_topmost_plugin)
else:
self.plugin_list_timer.invalidate()
for info in self.plugin_info:
plugin_id = info["name"]
if setctl.get_plugin_is_hidden(plugin_id):
continue
enabled = setctl.get_plugin_enabled(plugin_id)
name = info["localized_name"]
folded_name = kupferstring.tofolded(name)
desc = info["description"]
text = u"%s" % name
if us_filter:
name_score = relevance.score(name, us_filter)
fold_name_score = relevance.score(folded_name, us_filter)
desc_score = relevance.score(desc, us_filter)
if not name_score and not fold_name_score and desc_score < 0.9:
continue
self.store.append((plugin_id, enabled, "kupfer-object", text))
def _show_focus_topmost_plugin(self):
try:
first_row = iter(self.store).next()
except StopIteration:
return
plugin_id = first_row[0]
self.show_focus_plugin(plugin_id)
def on_checkplugin_toggled(self, cell, path):
checkcol = self.columns.index("enabled")
plugin_id = self._id_for_table_path(path)
it = self.store.get_iter(path)
plugin_is_enabled = not self.store.get_value(it, checkcol)
self.store.set_value(it, checkcol, plugin_is_enabled)
setctl = settings.GetSettingsController()
setctl.set_plugin_enabled(plugin_id, plugin_is_enabled)
def _id_for_table_path(self, path):
it = self.store.get_iter(path)
id_col = self.columns.index("plugin_id")
plugin_id = self.store.get_value(it, id_col)
return plugin_id
def _table_path_for_id(self, id_):
"""
Find the tree path of plugin @id_
"""
id_col = self.columns.index("plugin_id")
for row in self.store:
plugin_id = row[id_col]
if plugin_id == id_:
return row.path
raise ValueError("No such plugin %s" % id_)
def _plugin_info_for_id(self, plugin_id):
for info in self.plugin_info:
if info["name"] == plugin_id:
return info
return None
def plugin_table_cursor_changed(self, table):
curpath, curcol = table.get_cursor()
if not curpath:
return
plugin_id = self._id_for_table_path(curpath)
self.plugin_sidebar_update(plugin_id)
def plugin_sidebar_update(self, plugin_id):
about = gtk.VBox()
about.set_property("spacing", 15)
about.set_property("border-width", 5)
info = self._plugin_info_for_id(plugin_id)
title_label = gtk.Label()
title_label.set_markup(u"<b><big>%s</big></b>" % info["localized_name"])
version, description, author = plugins.get_plugin_attributes(plugin_id,
( "__version__", "__description__", "__author__", ))
about.pack_start(title_label, False)
infobox = gtk.VBox()
infobox.set_property("spacing", 3)
# TRANS: Plugin info fields
for field, val in zip((_("Description"), _("Author")),
(description, author)):
if not val:
continue
label = gtk.Label()
label.set_alignment(0, 0)
label.set_markup(u"<b>%s</b>" % field)
infobox.pack_start(label, False)
label = wrapped_label()
label.set_alignment(0, 0)
label.set_markup(u"%s" % gobject.markup_escape_text(val))
label.set_selectable(True)
infobox.pack_start(label, False)
if version:
label = wrapped_label()
label.set_alignment(0, 0)
label.set_markup(u"<b>%s:</b> %s" % (_("Version"), version))
label.set_selectable(True)
infobox.pack_start(label, False)
about.pack_start(infobox, False)
# Check for plugin load exception
exc_info = plugins.get_plugin_error(plugin_id)
if exc_info is not None:
etype, error, tb = exc_info
# TRANS: Error message when Plugin needs a Python module to load
import_error_localized = _("Python module '%s' is needed") % u"\\1"
import_error_pat = u"No module named ([^\s]+)"
errmsg = unicode(error)
if re.match(import_error_pat, errmsg):
errstr = re.sub(import_error_pat,
import_error_localized,
errmsg, count=1)
else:
import traceback
errstr = "".join(traceback.format_exception(*exc_info))
label = wrapped_label()
label.set_alignment(0, 0)
label.set_markup(u"<b>%s</b>\n\n%s" % (
_("Plugin could not be read due to an error:"),
gobject.markup_escape_text(errstr),
))
label.set_selectable(True)
about.pack_start(label, False)
elif not plugins.is_plugin_loaded(plugin_id):
label = gtk.Label()
label.set_alignment(0, 0)
label.set_text(u"(%s)" % _("disabled"))
about.pack_start(label, False)
wid = self._make_plugin_info_widget(plugin_id)
about.pack_start(wid, False)
psettings_wid = self._make_plugin_settings_widget(plugin_id)
if psettings_wid:
about.pack_start(psettings_wid, False)
oldch = self.plugin_about_parent.get_child()
if oldch:
self.plugin_about_parent.remove(oldch)
vp = gtk.Viewport()
vp.set_shadow_type(gtk.SHADOW_NONE)
vp.add(about)
self.plugin_about_parent.add(vp)
self.plugin_about_parent.show_all()
def _make_plugin_info_widget(self, plugin_id):
sources, actions, text_sources = \
plugins.get_plugin_attributes(plugin_id, (
plugins.sources_attribute,
plugins.action_decorators_attribute,
plugins.text_sources_attribute)
)
all_items = list()
vbox = gtk.VBox()
vbox.set_property("spacing", 5)
def make_objects_frame(objs, title):
frame_label = gtk.Label()
frame_label.set_markup(u"<b>%s</b>" % title)
frame_label.set_alignment(0, 0)
objvbox = gtk.VBox()
objvbox.pack_start(frame_label, False)
objvbox.set_property("spacing", 3)
for item in objs:
plugin_type = plugins.get_plugin_attribute(plugin_id, item)
if not plugin_type:
continue
hbox = gtk.HBox()
hbox.set_property("spacing", 3)
obj = plugin_type()
name = unicode(obj)
desc = obj.get_description() or u""
gicon = obj.get_icon()
im = gtk.Image()
im.set_property("gicon", gicon)
im.set_property("pixel-size", 32)
hbox.pack_start(im, False)
name_label = \
u"%s\n<small>%s</small>" % (name, desc) if desc else \
u"%s" % (name, )
label = wrapped_label()
label.set_markup(name_label)
hbox.pack_start(label, False)
objvbox.pack_start(hbox)
# Display information for application content-sources.
if not kobject_should_show(obj):
continue
try:
leaf_repr = obj.get_leaf_repr()
except AttributeError:
continue
if leaf_repr is None:
continue
hbox = gtk.HBox()
hbox.set_property("spacing", 3)
gicon = leaf_repr.get_icon()
im = gtk.Image()
im.set_property("gicon", gicon)
im.set_property("pixel-size", 16)
hbox.pack_start(gtk.Label(_("Content of")), False)
hbox.pack_start(im, False)
hbox.pack_start(gtk.Label(unicode(leaf_repr)), False)
objvbox.pack_start(hbox)
return objvbox
sources = list(sources or ()) + list(text_sources or ())
if sources:
# TRANS: Plugin contents header
swid = make_objects_frame(sources, _("Sources"))
vbox.pack_start(swid)
if actions:
# TRANS: Plugin contents header
awid = make_objects_frame(actions, _("Actions"))
vbox.pack_start(awid)
vbox.show_all()
return vbox
def _get_plugin_change_callback(self, plugin_id, key, value_type,
get_attr, no_false_values=False):
"""Callback factory for the plugin parameter configuration"""
def callback(widget):
value = getattr(widget, get_attr)()
if no_false_values and not value:
return
setctl = settings.GetSettingsController()
setctl.set_plugin_config(plugin_id, key, value, value_type)
return callback
def _get_plugin_credentials_callback(self, plugin_id, key):
def callback(widget):
setctl = settings.GetSettingsController()
val_type = plugin_support.UserNamePassword
upass = setctl.get_plugin_config(plugin_id, key, val_type) \
or plugin_support.UserNamePassword()
user_password = ask_user_credentials(upass.username, upass.password)
if user_password:
upass.username, upass.password = user_password
setctl.set_plugin_config(plugin_id, key, upass, val_type)
return callback
def _make_plugin_settings_widget(self, plugin_id):
plugin_settings = plugins.get_plugin_attribute(plugin_id,
plugins.settings_attribute)
if not plugin_settings:
return None
info = self._plugin_info_for_id(plugin_id)
title_label = gtk.Label()
# TRANS: Plugin-specific configuration (header)
title_label.set_markup(u"<b>%s</b>" % _("Configuration"))
title_label.set_alignment(0, 0)
vbox = gtk.VBox()
vbox.pack_start(title_label, False)
#vbox.set_property("spacing", 5)
plugin_settings_keys = iter(plugin_settings) if plugin_settings else ()
for setting in plugin_settings_keys:
typ = plugin_settings.get_value_type(setting)
alternatives = plugin_settings.get_alternatives(setting)
tooltip = plugin_settings.get_tooltip(setting)
wid = None
hbox = gtk.HBox()
hbox.set_property("spacing", 10)
if tooltip:
hbox.set_tooltip_text(tooltip)
label = plugin_settings.get_label(setting)
if issubclass(typ, plugin_support.UserNamePassword):
wid = gtk.Button(label or _("Set username and password"))
wid.connect("clicked", self._get_plugin_credentials_callback(
plugin_id, setting))
hbox.pack_start(wid, False)
vbox.pack_start(hbox, False)
continue
label_wid = wrapped_label(label, maxwid=200)
if issubclass(typ, basestring):
if alternatives:
wid = gtk.combo_box_new_text()
val = plugin_settings[setting]
active_index = -1
for idx, text in enumerate(alternatives):
wid.append_text(text)
if text == val:
active_index = idx
if active_index < 0:
wid.prepend_text(val)
active_index = 0
wid.set_active(active_index)
wid.connect("changed", self._get_plugin_change_callback(
plugin_id, setting, typ, "get_active_text"))
else:
wid = gtk.Entry()
wid.set_text(plugin_settings[setting])
wid.connect("changed", self._get_plugin_change_callback(
plugin_id, setting, typ, "get_text",
no_false_values=True))
hbox.pack_start(label_wid, False)
hbox.pack_start(wid, True)
elif issubclass(typ, bool):
wid = gtk.CheckButton(label)
wid.set_active(plugin_settings[setting])
hbox.pack_start(wid, False)
wid.connect("toggled", self._get_plugin_change_callback(
plugin_id, setting, typ, "get_active"))
elif issubclass(typ, int):
wid = gtk.SpinButton()
wid.set_increments(1, 1)
wid.set_range(0, 1000)
wid.set_value(plugin_settings[setting])
hbox.pack_start(label_wid, False)
hbox.pack_start(wid, False)
wid.connect("changed", self._get_plugin_change_callback(
plugin_id, setting, typ, "get_text", no_false_values=True))
vbox.pack_start(hbox, False)
vbox.show_all()
return vbox
def on_buttonadddirectory_clicked(self, widget):
# TRANS: File Chooser Title
chooser_dialog = gtk.FileChooserDialog(title=_("Choose a Directory"),
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
if chooser_dialog.run() == gtk.RESPONSE_ACCEPT:
selected_dir = chooser_dialog.get_filename()
self.add_directory_model(selected_dir, store=True)
chooser_dialog.hide()
def on_buttonremovedirectory_clicked(self, widget):
curpath, curcol = self.dir_table.get_cursor()
if not curpath:
return
it = self.dir_store.get_iter(curpath)
self.remove_directory_model(it, store=True)
def on_entry_plugins_filter_changed(self, widget):
s_filter = widget.get_text()
us_filter = kupferstring.tounicode(s_filter).lower()
self._refresh_plugin_list(us_filter)
def on_entry_plugins_filter_icon_press(self, entry, icon_pos, event):
entry.set_text('')
def on_keybindings_row_activate(self, treeview, path, view_column):
def bind_key_func(target):
def bind_key(keystr):
return keybindings.bind_key(keystr, target)
return bind_key
it = self.keybind_store.get_iter(path)
keybind_id = self.keybind_store.get_value(it, 2)
setctl = settings.GetSettingsController()
curr_key = setctl.get_global_keybinding(keybind_id)
bind_func = bind_key_func(self.KEYBINDING_TARGETS[keybind_id])
keystr = getkey_dialog.ask_for_key(bind_func, curr_key)
if keystr == '':
keybindings.bind_key(None, self.KEYBINDING_TARGETS[keybind_id])
setctl.set_global_keybinding(keybind_id, keystr)
self.keybind_store.set_value(it, 1, '')
elif keystr is not None:
setctl.set_global_keybinding(keybind_id, keystr)
label = gtk.accelerator_get_label(*gtk.accelerator_parse(keystr))
self.keybind_store.set_value(it, 1, label)
def _is_good_keystr(self, keystr):
# Reject single letters so you can't bind 'A' etc
if keystr is None:
return
label = gtk.accelerator_get_label(*gtk.accelerator_parse(keystr))
ulabel = kupferstring.tounicode(label)
return not (len(ulabel) == 1 and ulabel.isalnum())
def on_gkeybindings_row_activate(self, treeview, path, view_column):
it = self.gkeybind_store.get_iter(path)
keybind_id = self.gkeybind_store.get_value(it, 2)
setctl = settings.GetSettingsController()
curr_key = setctl.get_accelerator(keybind_id)
keystr = getkey_dialog.ask_for_key(self._is_good_keystr,
previous_key=curr_key)
if keystr is not None:
setctl.set_accelerator(keybind_id, keystr)
label = gtk.accelerator_get_label(*gtk.accelerator_parse(keystr))
self.gkeybind_store.set_value(it, 1, label)
def on_button_reset_keys_clicked(self, button):
if self.ask_user_for_reset_keybinding():
setctl = settings.GetSettingsController()
setctl.reset_keybindings()
self._show_keybindings(setctl)
# Unbind all before re-binding
for keybind_id, target in self.KEYBINDING_TARGETS.iteritems():
keybindings.bind_key(None, target)
for keybind_id, target in self.KEYBINDING_TARGETS.iteritems():
keystr = setctl.get_global_keybinding(keybind_id)
keybindings.bind_key(keystr, target)
def on_button_reset_gkeys_clicked(self, button):
if self.ask_user_for_reset_keybinding():
setctl = settings.GetSettingsController()
setctl.reset_accelerators()
self._show_gkeybindings(setctl)
def on_checkusecommandkeys_toggled(self, widget):
setctl = settings.GetSettingsController()
setctl.set_use_command_keys(widget.get_active())
def on_checkcloseonunfocus_toggled(self, widget):
setctl = settings.GetSettingsController()
setctl.set_close_on_unfocus(widget.get_active())
def dir_table_cursor_changed(self, table):
curpath, curcol = table.get_cursor()
if not curpath or not self.dir_store:
self.buttonremovedirectory.set_sensitive(False)
return
self.buttonremovedirectory.set_sensitive(True)
def show(self):
self.window.present()
def show_focus_plugin(self, plugin_id):
"""
Open and show information about plugin @plugin_id
"""
try:
table_path = self._table_path_for_id(plugin_id)
except ValueError:
self.entry_plugins_filter.set_text(u"")
self._refresh_plugin_list()
table_path = self._table_path_for_id(plugin_id)
self.table.set_cursor(table_path)
self.table.scroll_to_cell(table_path)
self.preferences_notebook.set_current_page(PLUGIN_LIST_PAGE)
self.window.present()
def hide(self):
self.window.hide()
def _close_window(self, *ignored):
self.hide()
return True
def ask_user_for_reset_keybinding(self):
dlg = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION)
dlg.set_markup(_("Reset all shortcuts to default values?"))
dlg.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CLOSE,
_('Reset'), gtk.RESPONSE_ACCEPT)
result = dlg.run() == gtk.RESPONSE_ACCEPT
dlg.destroy()
return result
_conf_keys_list_columns = [{"key": "name", "type":str, 'header': _('Command')},
{"key": "key", "type": str, 'header': _('Shortcut') },
{"key": "keybinding_id", "type": str, 'header': None}]
_conf_keys_list_column_types = [c["type"] for c in _conf_keys_list_columns]
def _create_conf_keys_list():
keybind_store = gtk.ListStore(*_conf_keys_list_column_types)
keybind_table = gtk.TreeView(keybind_store)
for idx, col in enumerate(_conf_keys_list_columns):
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(col['header'], renderer, text=idx)
column.set_visible(col['header'] is not None)
keybind_table.append_column(column)
keybind_table.set_property("enable-search", False)
keybind_table.set_rules_hint(True)
keybind_table.set_headers_visible(True)
keybind_table.show()
return keybind_table, keybind_store
_preferences_window = None
def GetPreferencesWindowController():
global _preferences_window
if _preferences_window is None:
_preferences_window = PreferencesWindowController()
return _preferences_window
class SourceListController (object):
def __init__(self, parent_widget):
columns = [
{"key": "source", "type": gobject.TYPE_PYOBJECT },
{"key": "plugin_id", "type": str },
{"key": "toplevel", "type": bool },
{"key": "icon", "type": gio.Icon },
{"key": "markup", "type": str },
]
# setup plugin list table
column_types = [c["type"] for c in columns]
self.columns = [c["key"] for c in columns]
self.store = gtk.ListStore(*column_types)
self.table = gtk.TreeView(self.store)
self.table.set_headers_visible(False)
self.table.set_property("enable-search", False)
self.table.set_rules_hint(True)
#self.table.connect("cursor-changed", self.plugin_table_cursor_changed)
self.table.get_selection().set_mode(gtk.SELECTION_NONE)
checkcell = gtk.CellRendererToggle()
checkcol = gtk.TreeViewColumn("item", checkcell)
checkcol.add_attribute(checkcell, "active",
self.columns.index("toplevel"))
checkcell.connect("toggled", self.on_checktoplevel_enabled)
icon_cell = gtk.CellRendererPixbuf()
icon_cell.set_property("height", LIST_ICON_SIZE)
icon_cell.set_property("width", LIST_ICON_SIZE)
icon_col = gtk.TreeViewColumn("icon", icon_cell)
icon_col.add_attribute(icon_cell, "gicon",
self.columns.index("icon"))
cell = gtk.CellRendererText()
col = gtk.TreeViewColumn("item", cell)
col.add_attribute(cell, "markup", self.columns.index("markup"))
self.table.append_column(checkcol)
self.table.append_column(icon_col)
self.table.append_column(col)
self._refresh()
self.table.show()
parent_widget.add(self.table)
setctl = settings.GetSettingsController()
setctl.connect("plugin-enabled-changed", self._refresh)
def _refresh(self, *ignored):
self.store.clear()
setctl = settings.GetSettingsController()
sc = sources.GetSourceController()
srcs = sorted(sc.get_sources(), key=unicode)
for src in srcs:
name = unicode(src)
plugin_id = sc.get_plugin_id_for_object(src)
if not plugin_id or setctl.get_plugin_is_hidden(plugin_id):
continue
if not kobject_should_show(src):
continue
gicon = src.get_icon()
toplevel = setctl.get_source_is_toplevel(plugin_id, src)
self.store.append((src, plugin_id, toplevel, gicon, name))
def on_checktoplevel_enabled(self, cell, path):
it = self.store.get_iter(path)
checkcol = self.columns.index("toplevel")
idcol = self.columns.index("plugin_id")
srccol = self.columns.index("source")
is_toplevel = not self.store.get_value(it, checkcol)
plugin_id = self.store.get_value(it, idcol)
src = self.store.get_value(it, srccol)
sc = sources.GetSourceController()
sc.set_toplevel(src, is_toplevel)
setctl = settings.GetSettingsController()
setctl.set_source_is_toplevel(plugin_id, src, is_toplevel)
self.store.set_value(it, checkcol, is_toplevel)
| cjparsons74/kupfer | kupfer/ui/preferences.py | Python | gpl-3.0 | 30,515 |
""" Dummy Service is a service for testing new dirac protocol
This file must be copied in FrameworkSystem/Service to run tests
"""
__RCSID__ = "$Id$"
import six
from DIRAC import S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
# You need to copy ../DB/UserDB in DIRAC/FrameworkSystem/DB
from DIRAC.FrameworkSystem.DB.UserDB import UserDB # pylint: disable=no-name-in-module, import-error
from DIRAC import gConfig
class UserDiracHandler(RequestHandler):
"""
A handler designed for testing Tornado by implementing a basic access to database
Designed to compare Diset and Tornado
"""
@classmethod
def initializeHandler(cls, serviceInfo):
"""Handler initialization"""
cls.userDB = UserDB()
return S_OK()
auth_addUser = ["all"]
types_addUser = [six.string_types]
def export_addUser(self, whom):
"""Add a user and return user id"""
newUser = self.userDB.addUser(whom)
if newUser["OK"]:
return S_OK(newUser["lastRowId"])
return newUser
auth_editUser = ["all"]
types_editUser = [int, six.string_types]
def export_editUser(self, uid, value):
"""Edit a user"""
return self.userDB.editUser(uid, value)
auth_getUserName = ["all"]
types_getUserName = [int]
def export_getUserName(self, uid):
"""Get a user"""
return self.userDB.getUserName(uid)
auth_listUsers = ["all"]
types_listUsers = []
def export_listUsers(self):
return self.userDB.listUsers()
auth_unauthorized = ["nobody"]
types_unauthorized = []
def export_unauthorized(self):
return S_OK()
auth_getTestValue = ["all"]
types_getTestValue = []
def export_getTestValue(self):
return S_OK(gConfig.getValue("/DIRAC/Configuration/TestUpdateValue"))
| ic-hep/DIRAC | tests/Integration/TornadoServices/Services/UserDiracHandler.py | Python | gpl-3.0 | 1,849 |
# -*- coding: utf-8 -*-
"""
Implements the TwoSteps class
"""
from xfv.src.custom_functions.custom_function import CustomFunction
class TwoSteps(CustomFunction):
"""
This class defines a 2 constant steps function
.. image:: two_steps.png
:scale: 75 %
:align: center
"""
def __init__(self, first_value, second_value, critical_time):
self.__first_value = first_value
self.__second_value = second_value
self.__critical_time = critical_time
def evaluate(self, time, *args, **kwargs):
"""
Returns the value of the function evaluated at time
:param time: the required time
:return: the value
"""
if time <= self.__critical_time:
return self.__first_value
return self.__second_value
| hippo91/XVOF | xfv/src/custom_functions/two_steps.py | Python | gpl-3.0 | 813 |
from datetime import datetime, tzinfo
try:
import pytz
except ImportError:
pytz = None
from django.template import Node
from django.template import TemplateSyntaxError, Library
from django.utils import six
from django.utils import timezone
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
If it is a time zone name, pytz is required.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, six.string_types) and pytz is not None:
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
result = timezone.localtime(value, tz)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, the default time zone is used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
| edisonlz/fruit | web_project/base/site-packages/django/templatetags/tz.py | Python | apache-2.0 | 5,624 |
# Copyright (c) 2014-present PlatformIO <[email protected]>
# Copyright 2020 MongoDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: disable=unused-argument, protected-access, unused-variable, import-error
# Original: https://github.com/mongodb/mongo/blob/master/site_scons/site_tools/compilation_db.py
from __future__ import absolute_import
import itertools
import json
import os
import SCons
from platformio.builder.tools.platformio import SRC_ASM_EXT, SRC_C_EXT, SRC_CXX_EXT
from platformio.proc import where_is_program
# Implements the ability for SCons to emit a compilation database for the MongoDB project. See
# http://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation
# database is, and why you might want one. The only user visible entry point here is
# 'env.CompilationDatabase'. This method takes an optional 'target' to name the file that
# should hold the compilation database, otherwise, the file defaults to compile_commands.json,
# which is the name that most clang tools search for by default.
# Is there a better way to do this than this global? Right now this exists so that the
# emitter we add can record all of the things it emits, so that the scanner for the top level
# compilation database can access the complete list, and also so that the writer has easy
# access to write all of the files. But it seems clunky. How can the emitter and the scanner
# communicate more gracefully?
__COMPILATION_DB_ENTRIES = []
# We make no effort to avoid rebuilding the entries. Someday, perhaps we could and even
# integrate with the cache, but there doesn't seem to be much call for it.
class __CompilationDbNode(SCons.Node.Python.Value):
def __init__(self, value):
SCons.Node.Python.Value.__init__(self, value)
self.Decider(changed_since_last_build_node)
def changed_since_last_build_node(*args, **kwargs):
"""Dummy decider to force always building"""
return True
def makeEmitCompilationDbEntry(comstr):
"""
Effectively this creates a lambda function to capture:
* command line
* source
* target
:param comstr: unevaluated command line
:return: an emitter which has captured the above
"""
user_action = SCons.Action.Action(comstr)
def EmitCompilationDbEntry(target, source, env):
"""
This emitter will be added to each c/c++ object build to capture the info needed
for clang tools
:param target: target node(s)
:param source: source node(s)
:param env: Environment for use building this node
:return: target(s), source(s)
"""
# Resolve absolute path of toolchain
for cmd in ("CC", "CXX", "AS"):
if cmd not in env:
continue
if os.path.isabs(env[cmd]):
continue
env[cmd] = where_is_program(
env.subst("$%s" % cmd), env.subst("${ENV['PATH']}")
)
dbtarget = __CompilationDbNode(source)
entry = env.__COMPILATIONDB_Entry(
target=dbtarget,
source=[],
__COMPILATIONDB_UTARGET=target,
__COMPILATIONDB_USOURCE=source,
__COMPILATIONDB_UACTION=user_action,
__COMPILATIONDB_ENV=env,
)
# Technically, these next two lines should not be required: it should be fine to
# cache the entries. However, they don't seem to update properly. Since they are quick
# to re-generate disable caching and sidestep this problem.
env.AlwaysBuild(entry)
env.NoCache(entry)
__COMPILATION_DB_ENTRIES.append(dbtarget)
return target, source
return EmitCompilationDbEntry
def CompilationDbEntryAction(target, source, env, **kw):
"""
Create a dictionary with evaluated command line, target, source
and store that info as an attribute on the target
(Which has been stored in __COMPILATION_DB_ENTRIES array
:param target: target node(s)
:param source: source node(s)
:param env: Environment for use building this node
:param kw:
:return: None
"""
command = env["__COMPILATIONDB_UACTION"].strfunction(
target=env["__COMPILATIONDB_UTARGET"],
source=env["__COMPILATIONDB_USOURCE"],
env=env["__COMPILATIONDB_ENV"],
)
entry = {
"directory": env.Dir("#").abspath,
"command": command,
"file": str(env["__COMPILATIONDB_USOURCE"][0]),
}
target[0].write(entry)
def WriteCompilationDb(target, source, env):
entries = []
for s in __COMPILATION_DB_ENTRIES:
item = s.read()
item["file"] = os.path.abspath(item["file"])
entries.append(item)
with open(str(target[0]), mode="w", encoding="utf8") as target_file:
json.dump(
entries, target_file, sort_keys=True, indent=4, separators=(",", ": ")
)
def ScanCompilationDb(node, env, path):
return __COMPILATION_DB_ENTRIES
def generate(env, **kwargs):
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
env["COMPILATIONDB_COMSTR"] = kwargs.get(
"COMPILATIONDB_COMSTR", "Building compilation database $TARGET"
)
components_by_suffix = itertools.chain(
itertools.product(
[".%s" % ext for ext in SRC_C_EXT],
[
(static_obj, SCons.Defaults.StaticObjectEmitter, "$CCCOM"),
(shared_obj, SCons.Defaults.SharedObjectEmitter, "$SHCCCOM"),
],
),
itertools.product(
[".%s" % ext for ext in SRC_CXX_EXT],
[
(static_obj, SCons.Defaults.StaticObjectEmitter, "$CXXCOM"),
(shared_obj, SCons.Defaults.SharedObjectEmitter, "$SHCXXCOM"),
],
),
itertools.product(
[".%s" % ext for ext in SRC_ASM_EXT],
[(static_obj, SCons.Defaults.StaticObjectEmitter, "$ASCOM")],
),
)
for entry in components_by_suffix:
suffix = entry[0]
builder, base_emitter, command = entry[1]
# Assumes a dictionary emitter
emitter = builder.emitter[suffix]
builder.emitter[suffix] = SCons.Builder.ListEmitter(
[emitter, makeEmitCompilationDbEntry(command)]
)
env["BUILDERS"]["__COMPILATIONDB_Entry"] = SCons.Builder.Builder(
action=SCons.Action.Action(CompilationDbEntryAction, None),
)
env["BUILDERS"]["__COMPILATIONDB_Database"] = SCons.Builder.Builder(
action=SCons.Action.Action(WriteCompilationDb, "$COMPILATIONDB_COMSTR"),
target_scanner=SCons.Scanner.Scanner(
function=ScanCompilationDb, node_class=None
),
)
def CompilationDatabase(env, target):
result = env.__COMPILATIONDB_Database(target=target, source=[])
env.AlwaysBuild(result)
env.NoCache(result)
return result
env.AddMethod(CompilationDatabase, "CompilationDatabase")
def exists(env):
return True
| platformio/platformio-core | platformio/builder/tools/compilation_db.py | Python | apache-2.0 | 8,017 |
#!/bin/env python
# -*- coding: utf-8 -*
# vim: set sw=4:
import sys
import os
import logging
import time
import re
import random
import common
logger = logging.getLogger(__name__)
#
# Import python-uinput
#
version = "%d.%d" % sys.version_info[0:2]
uinput_include_path = "uinput/dst/lib64/python%s/site-packages/" % version
UINPUTPYDIR = os.path.join(common.igor.libdir, uinput_include_path)
if not os.path.exists(UINPUTPYDIR):
raise Exception("No uinput for this python version: %s" % UINPUTPYDIR)
common.run("modprobe uinput")
common.add_searchpath(UINPUTPYDIR)
import uinput
# Map a char to a key
charmap = {
".": "dot",
"-": "minus",
"+": "plus",
" ": "space",
"\t": "tab",
"\n": "enter"
}
def _all_keys():
"""Fetches all key related capabilities.
"""
keys = []
for k in uinput.__dict__:
if re.match("^KEY_", k):
keys.append(uinput.__dict__[k])
return keys
device = uinput.Device(_all_keys())
class PressedKey(object):
key = None
def __init__(self, k):
self.key = k
def __enter__(self):
device.emit(self.key, 1)
def __exit__(self, type, value, traceback):
device.emit(self.key, 0)
def char_to_key(char):
"""Maps a character to a key-code
"""
if char in charmap:
char = charmap[char]
key_key = "KEY_%s" % char.upper()
return uinput.__dict__[key_key]
def press_key(key, delay=12):
"""Simulates a key stroke
"""
with PressedKey(key):
time.sleep(1.0 / 100 * delay * random.uniform(0.5, 1.5))
def send_input(txt):
"""Send the string as keystrokes to uinput
"""
logger.debug("Inputing: %s" % txt)
for char in txt:
if char.isupper():
with PressedKey(uinput.KEY_LEFTSHIFT):
press_key(char_to_key(char.lower()))
else:
press_key(char_to_key(char.lower()))
def play(seq):
"""Plays a sequence of text, single keys and callables
"""
if type(seq) is not list:
raise Exception("seq is expected to be a list of text, KEY_ " + \
"and callables")
for item in seq:
if callable(item):
item()
elif type(item) is tuple:
# Expected to be a uinput.KEY_
press_key(item)
elif type(item) in [str, unicode]:
send_input(item)
else:
logger.warning("Unknown sequence type: %s (%s)" % (type(item), \
item))
def screen_content(vcsn=1):
vcs = "/dev/vcs%s" % vcsn
logger.debug("Grabbing content from '%s'" % vcs)
# setterm -dump $N
content = open(vcs, "r").read()
return content
def is_regex_on_screen(expr, vcsn=1):
"""Check if the given expression appears on the screen.
"""
content = screen_content(vcsn)
logger.debug("Looking for '%s' on '%s'" % (expr, vcsn))
regex = re.compile(expr)
return regex.search(content) is not None
def wait_for_regex_on_screen(expr, timeout, vcsn=1):
"""Check for at max timeout seconds if expr appears on the screen
"""
found = False
while timeout > 0:
time.sleep(1)
if is_regex_on_screen(expr, vcsn):
found = True
break
timeout -= 1
return found
class Storyboard(object):
title = None
story = None
def __init__(self, title, story):
self.title = title
self.story = story
def check(self):
"""Checks a "storyboard", so if the system behaves as the story tells
A storyboard is expected to be in the form of:
story = [
(input_for_play, output_for_is_regex_on_screen_or_callable),
.
.
.
]
"""
passed = True
for storyline in self.story:
logger.info("Testing: %s" % str(storyline))
input, wait, output = storyline
if input is None:
logger.debug("No input to send")
else:
play(input)
if callable(wait):
wait()
else:
time.sleep(wait)
if output is None:
logger.debug("No output expected")
elif callable(output):
passed = output(input)
else:
passed = is_regex_on_screen(output)
if passed == False:
content = screen_content()
raise Exception("Response is not as expected.\n" + \
"Sent: %s\nExpected: %s\nGot: %s" % (input, \
output, \
content))
msg = "passed" if passed else "failed"
logger.info("Storyboard ended, finished: %s" % msg)
return passed
def run(self):
"""Run the story and eitehr return 0 on success or 1 on failure
"""
logger.info("Starting simulated %s" % self.title)
passed = False
try:
passed = self.check()
except Exception as e:
logger.warning("An exception: %s" % e.message)
passed = False
logger.info("Finished simulated %s" % self.title)
return passed
def run_and_exit(self):
"""Run the story and exit
"""
sys.exit(0 if self.run() else 1)
| sdoumbouya/ovirt-node | tests/igor/libs/common/input.py | Python | gpl-2.0 | 5,460 |
'''
Created on 24.02.2017
@author: steinorb
'''
import unittest.mock
import read_arduino
import globvar
class ReadArduinoTest(unittest.TestCase):
def setUp(self):
globvar.measure0 = 0
@unittest.mock.patch('read_arduino.serial.Serial')
def testCorrectResult(self, mock_Serial):
"""
Checks the correct parsing of the result
"""
target = unittest.mock.MagicMock()
mock_Serial.return_value = target
target.readline.side_effect = [b'123 456 789 876 543 0 333 1023']
read_arduino.read()
self.assertEqual(123, globvar.measure0)
@unittest.mock.patch('read_arduino.serial.Serial')
def testCorruptedResult(self, mock_Serial):
"""
Checks that a measurement is retried when garbaged data is read
"""
target = unittest.mock.MagicMock()
mock_Serial.return_value = target
target.readline.side_effect = [b'123 4\x136 789 876 543 0 333 1023', b'321 456 789 876 543 0 333 1023']
read_arduino.read()
self.assertEqual(321, globvar.measure0)
@unittest.mock.patch('read_arduino.serial.Serial')
def testNoEndlessLoop(self, mock_Serial):
"""
The read must not yield into an endless loop. After several retries a ValueError will be thrown
"""
target = unittest.mock.MagicMock()
mock_Serial.return_value = target
target.readline.return_value = b'123 456' # a shortened result should give a retry
with self.assertRaises(ValueError): # a value error should be raised after several tries
read_arduino.read()
if __name__ == "__main__":
unittest.main() | tropi-frutti/facharbeit-fhg | multimeter/test/test_read_arduino.py | Python | mit | 1,728 |
#!/usr/bin/env python
import os
import sys
import subprocess
import shlex
import argparse
import random
import platform
CONFIG_PATH = os.path.expanduser('~/.config/wallpaperchanger.conf')
if sys.version_info.major == 2:
import ConfigParser as configparser
import Tkinter as tk
else: # major version == 3
import configparser
import tkinter as tk
try:
import PIL.Image
import PIL.ImageTk
except ImportError:
PIL = None
class WallpaperChanger(object):
"""main class
this contains filenames, config.
"""
def __init__(self):
self.wrap_config = WrapConfig()
self.wrap_config.load()
self.config = self.wrap_config.config
self.base_path = os.path.expanduser(self.config.get('Main', 'path'))
def call(self, filename, is_abspath=False):
if is_abspath:
path = filename
else:
path = os.path.join(self.base_path, filename)
replace_dic = {'filepath': path}
command = []
# avoid to split filename which includes spaces.
for line in shlex.split(self.config.get('Main', 'command')):
command.append(line.format(**replace_dic))
res = subprocess.call(command)
if res == 0:
self.config.set('Wallpaper', 'current', filename)
self.wrap_config.write()
def get_filenames(self):
return sorted(os.listdir(self.base_path))
def get_abspath(self, filename):
return os.path.join(self.base_path, filename)
def get_default_wallpaper_change_command():
system_name = platform.system()
if system_name == 'Linux': # linux
return 'feh --bg-fill {filepath}'
elif system_name == 'Darwin': # mac os x
return r'osascript -e "tell application \"Finder\" to set desktop picture to POSIX file \"{filepath}\""'
class WrapConfig(object):
"""
Wrap ConfigParser,
"""
DEFAULT = {
'Main': {
'path': '~/picture/wallpaper',
'command': get_default_wallpaper_change_command(),
},
'Wallpaper': {
'current': '',
'default': '',
}
}
def __init__(self):
self.config = configparser.ConfigParser()
def load(self):
"""load config file
if not exists, make file.
"""
if self.is_exists():
self.config.read(CONFIG_PATH)
else:
self.set_default()
self.write()
def write(self):
"""save config file,
automatically make directory.
"""
if not self.is_exists_parent_directory():
parent_path = self._get_parent_path()
os.makedirs(parent_path)
with open(CONFIG_PATH, 'w') as fp:
self.config.write(fp)
def set_default(self, overwrite=False):
"""set default, referring self.DEFAULT dictionary
if overwrite flag is True,
all config is overwrite.
if the flag is False and not exists the option, append.
"""
for section in self.DEFAULT.keys():
if not self.config.has_section(section):
self.config.add_section(section)
for option in self.DEFAULT.get(section, {}).keys():
if overwrite or not self.config.has_option(section, option):
self.config.set(section, option, self.DEFAULT[section][option])
def is_exists_parent_directory(self):
parent_path = self._get_parent_path()
return os.path.isdir(parent_path)
def is_exists(self):
return os.path.exists(CONFIG_PATH)
def _get_parent_path(self):
return os.path.abspath(os.path.dirname(CONFIG_PATH))
class Gui(tk.Frame):
"""
Graphical interface for wallpaper selecting.
"""
THUMBNAIL_SIZE = (400, 400)
def __init__(self, master, changer):
self._changer = changer
tk.Frame.__init__(self, master)
self.pack()
self.create_widgets()
self.init_binds()
self.set_listbox_filenames()
self.set_thumbnail()
self.filename = None
self.key = ''
def create_widgets(self):
"""init widgets
"""
f_left = tk.Frame(self)
f_left.pack({'fill': tk.BOTH, 'side': 'left'})
self.elem_listbox = tk.Listbox(f_left)
self.elem_listbox.pack({'side': 'top', 'fill': tk.BOTH})
self.elem_entry = tk.Entry(f_left, textvariable=self.gen_entry_callback())
self.elem_entry.pack({'side': 'bottom'})
self.elem_entry.focus_set()
if PIL is not None:
f_right = tk.Frame(self)
f_right.pack({'fill': tk.BOTH})
self.elem_thumbnail = tk.Label(f_right)
self.elem_thumbnail.pack({
'side': 'right',
})
def init_binds(self):
"""init binds
"""
self.master.bind('<Escape>', self.action_destroy)
self.master.bind('<Return>', self.action_finish)
self.master.bind('<Tab>', self.action_completion)
self.elem_listbox.bind('<<ListboxSelect>>', self.action_select)
self.elem_listbox.bind('<Double-Button-1>', self.action_finish)
def action_destroy(self, *args):
"""destroy gui
callback function
"""
self.master.destroy()
def action_select(self, event=None):
"""set thumbnail
when select item in listbox, called
callback function
"""
if event is not None:
idx = int(self.elem_listbox.curselection()[0])
self.filename = self.elem_listbox.get(idx)
self.set_thumbnail(self.filename)
def action_finish(self, *args):
"""apply new wallpaper by calling Changer.call
"""
if self.filename is not None:
self._changer.call(self.filename)
self.action_destroy()
def action_completion(self, *args):
"""Completion in textbox(Entry).
hooked Tab key, and disable default tab action by returning "break".
"""
names = self.get_filtered_filenames(self.key)
base = names[0]
others = names[1:]
for idx in (len(base) - x for x in range(len(base))):
flag = True
for line in others:
if not base[:idx] in line:
flag = False
if flag:
self.elem_entry.delete(0, tk.END)
self.elem_entry.insert(0, base[:idx])
break
return 'break'
def gen_entry_callback(self):
def callback(sv):
self.key = sv.get()
names = self.get_filtered_filenames(self.key)
self.set_listbox_filenames(names)
if len(names) == 1:
self.filename = names[0]
self.set_thumbnail(names[0])
else:
self.filename = None
self.set_thumbnail()
string_var = tk.StringVar()
string_var.trace('w', lambda name, index, mode, sv=string_var: callback(sv))
return string_var
def set_listbox_filenames(self, filenames=None):
self.elem_listbox.delete(0, self.elem_listbox.size() - 1)
if filenames is None:
filenames = self._changer.get_filenames()
for name in filenames:
self.elem_listbox.insert(tk.END, name)
def set_thumbnail(self, ifilename=None):
if PIL is not None:
size = self.THUMBNAIL_SIZE
thumbnail = PIL.Image.new('RGBA', size, (0, 0, 0, 0))
if ifilename is not None:
filename = self._changer.get_abspath(ifilename)
image = PIL.Image.open(filename)
image.thumbnail(size, PIL.Image.ANTIALIAS)
offset_x = int(max((size[0] - image.size[0]) / 2, 0))
offset_y = int(max((size[1] - image.size[1]) / 2, 0))
thumbnail.paste(image, (offset_x, offset_y))
self.thumbnail = PIL.ImageTk.PhotoImage(thumbnail)
self.elem_thumbnail.configure(image=self.thumbnail)
def get_filtered_filenames(self, keyword):
return [x for x in self._changer.get_filenames() if x.find(keyword) == 0]
def parse_argument():
parser = argparse.ArgumentParser(
description='Wallpaper Changer on Python.',
epilog='''Change ~/.config/wallpaperchanger.conf if you need.'''
)
parser.add_argument('filename',
nargs='?',
default=None,
help='set the picture, ')
parser.add_argument('-d', '--default',
action='store_true',
help='set default picture as wallpaper, if you set config'
)
parser.add_argument('-r', '--random',
action='store_true',
help='set random picture',
)
parser.add_argument('-n', '--next',
action='store_true',
help='set next picture, alphabetical order.',
)
parser.add_argument('--init',
action='store_true',
help='regen config file.')
return parser.parse_args()
def main():
arguments = parse_argument()
changer = WallpaperChanger()
if arguments.filename is not None:
is_abspath = False
filename = ''
if arguments.filename in changer.get_filenames():
filename = arguments.filename
elif os.path.exists(os.path.abspath(arguments.filename)):
filename = os.path.abspath(arguments.filename)
is_abspath = True
else:
print("'{filename}' not found".format(filename=arguments.filename))
exit(1)
changer.call(filename, is_abspath)
return
if arguments.default:
filename = changer.config.get('Wallpaper', 'default')
changer.call(filename)
return
if arguments.random:
filenames = changer.get_filenames()
idx = random.randrange(0, len(filenames))
filename = filenames[idx]
changer.call(filename)
return
if arguments.next:
filenames = changer.get_filenames()
current = changer.config.get('Wallpaper', 'current')
idx = filenames.find(current) # -1 or 0<=idx<len
filename = filenames[(idx + 1) % len(filenames)]
changer.call(filename)
return
if arguments.init:
changer.wrap_config.set_default(overwrite=True)
changer.wrap_config.write()
w = tk.Tk()
gui = Gui(w, changer)
gui.mainloop()
if __name__ == '__main__':
main()
| cocuh/WallpaperChanger | wallpaperchanger.py | Python | apache-2.0 | 10,701 |
import random, keras
import numpy as np
from keras.preprocessing import sequence
from scipy.spatial.distance import cosine
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Dropout, Activation, Flatten, Merge, Embedding
from keras.layers import LSTM
#read Embedding if the word is in word_dict
def read_embedding(word_dict, embedding_file_path):
embedding_size = 64
embedding_file = open(embedding_file_path, 'rb')
embedding_matrix = np.zeros((len(word_dict) + 1, embedding_size))
for line in embedding_file:
terms = line.rstrip().split(' ')
if not len(terms) == embedding_size + 1:
continue
if terms[0] in word_dict:
ids = word_dict[terms[0]]
embedding_vec = np.asarray(terms[1:], dtype='float32')
embedding_matrix[ids] = embedding_vec
return embedding_matrix
#transfer each word to word id
def transfer_data(word_vec, word_dict):
vec = []
for word in word_vec:
if not word in word_dict:
word_dict[word] = len(word_dict)
vec.append(word_dict[word])
return vec
def sim_max(sentence, labelId, embedding_matrix):
max_sim = 0.0
for ids in sentence:
embedding = embedding_matrix[ids]
simlarity = 1.0 - cosine(embedding, embedding_matrix[labelId])
if max_sim < simlarity:
max_sim = simlarity
return max_sim
def avg_embedding(sentences, embedding_matrix):
word_embeddings = []
for sentence in sentences:
for ids in sentence:
embedding = embedding_matrix[ids]
word_embeddings.append(embedding)
return np.mean(word_embeddings, axis = 0)
#select sentences
def filter_dataset_seq(labelId, sentences, embedding_matrix):
x = []
max_score = 0
max_sentence = []
for sentence in sentences:
cur_score = sim_max(sentence, labelId, embedding_matrix)
if cur_score > max_score:
max_score = cur_score
max_sentence = sentence
[max_sentence] = sequence.pad_sequences([max_sentence], maxlen=40)
return avg_embedding(sentences, embedding_matrix), max_sentence, embedding_matrix[labelId]
if __name__ == "__main__":
###################################################################
# Read tag file
###################################################################
TAG_FILE_PATH = "./tag.list"
tag_map = {}
tag_file = open(TAG_FILE_PATH, 'rb')
for line in tag_file:
tag = line.rstrip()
add = True
for item in tag_map.keys():
if item == tag: #replacy by similarity() function later
add = False
break
if add:
tag_map[tag] = 0
tag_file.close()
sample_map = {}
sentence_map = {}
word_dict = {"&&":0}
for i in range(6):
###################################################################
# Read label file
# Positive Sample if Tag in
# Nagetive Sample if Tag not in (Randomly picked up for balancing)
###################################################################
LABEL_FILE_PATH = "../../data/" + str(i) + ".part.tokens.label"
Label_file = open(LABEL_FILE_PATH, 'rb')
for line in Label_file:
terms = line.split('\t')
if len(terms) <= 2:
continue
key = terms[0] + ' ' + terms[1]
local_map = {}
#positive
for term in terms[2:]:
words = term.split(' ')
if words[0] == 'not' or words[0] == 'no':
continue
if words[len(words) - 1] in tag_map:
local_map[words[len(words) - 1]] = 1
if len(local_map) == 0:
continue
#negative
positive_count = len(local_map)
for count in range(positive_count):
pos = random.randrange(0, len(tag_map))
while tag_map.keys()[pos] in local_map:
pos = random.randrange(0, len(tag_map))
local_map[tag_map.keys()[pos]] = 0
#record
sample_map[key] = []
for tag in local_map.keys():
sample_map[key].append([tag, local_map[tag]])
Label_file.close()
###################################################################
# Read Sentences
###################################################################
SENENCE_FILE_PATH = "../../data/" + str(i) + ".part.tokens.sentence"
sentence_file = open(SENENCE_FILE_PATH, 'rb')
for line in sentence_file:
terms = line.rstrip().split("\t")
if len(terms) <= 2:
continue
key = terms[0] + ' ' + terms[1]
if not key in sample_map:
continue
sentences = []
sentence = []
for term in terms[2:]:
if term == '&&':
if len(sentence) > 5 and len(sentence) < 40:
sentences.append(transfer_data(sentence, word_dict))
sentence = []
else:
sentence.append(term)
if len(sentences) > 0:
sentence_map[key] = sentences
sentence_file.close()
print "word_dict " + str(len(word_dict))
print "characters " + str(len(sentence_map))
print "data read finished"
###################################################################
# Read embedding
###################################################################
EMBEDDING_FILE_PATH = "../../data/full_story_vec.txt"
embedding_matrix = read_embedding(word_dict, EMBEDDING_FILE_PATH)
print "embedding read finished"
###################################################################
# Construct features
###################################################################
X = [[], [], []]
y = []
for key in sentence_map.keys():
for sample in sample_map[key]:
if not sample[0] in word_dict:
continue
context,sentence,label_embedding = \
filter_dataset_seq(word_dict[sample[0]], sentence_map[key], embedding_matrix)
X[0].append(context)
X[1].append(sentence)
X[2].append(label_embedding)
y.append(sample[1])
X[0] = np.asmatrix(X[0])
X[1] = np.asmatrix(X[1])
X[2] = np.asmatrix(X[2])
y = np.asarray(y)
scores = []
###################################################################
#model
###################################################################
embedding_size = 64
max_features = len(word_dict) + 1
batch_size = 32
nb_epoch = 5
embedding_trainable = True
early_stop = False
maxlen = 40
from sklearn.model_selection import KFold
import copy
from sklearn.metrics import (precision_score, recall_score,f1_score, accuracy_score)
kf = KFold(n_splits=5, shuffle=True, random_state = 7)
for train, test in kf.split(y):
X_train = []
X_test = []
for i in range(3):
X_train.append(X[i][train])
X_test.append(X[i][test])
y_train, y_test = y[train], y[test]
weights = copy.deepcopy(embedding_matrix)
context_input = Input(shape=(embedding_size, ))
word_input = Input(shape=(embedding_size, ))
sentence_input = Input(shape=(40,), dtype='int32')
x = Embedding(output_dim=embedding_size, input_dim=max_features, input_length=40, weights=[weights])(sentence_input)
sentence_out = LSTM(output_dim=64)(x)
x = keras.layers.concatenate([word_input, sentence_out, context_input], axis=-1)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
model = Model(inputs=[word_input, sentence_input, context_input], outputs=main_output)
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_split=0.1)
y_pred = model.predict(X_test)
y_pred = [int(np.round(x)) for x in y_pred]
accuracy = accuracy_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
print('Result\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'.format(precision, recall, f1, accuracy))
scores.append([precision, recall, f1, accuracy])
print "REC\t" + str(np.average(scores, axis = 0))
| jerrynlp/AutoSum | classification/cf_data.py | Python | mit | 8,761 |
#!/usr/bin/env python
'''
thermo_calc.py:
Calculate thermodynamic stability (minimum free energy (mfe) structures)
<Energy>
(1)miRNA seed region vs TargetRNA seed region
-------- miRNA(8nt_seed)
||||||||
-------- TargetRNA(8nt_seed)
(2)mature miRNA vs candidate target site (the same length)
---------------------- miRNA
||||||||||||||||||||||
---------------------- TargetRNA
(3)mature miRNA vs local TargetRNA region (70nt window)
---------------------- miRNA
||||||||||||||||||||||
-------------------------------------- TargetRNA
<Reference>
[1] Stormo GD. An overview of RNA structure prediction and applications to RNA gene prediction and RNAi design. Curr Protoc Bioinformatics. 2006 Mar;Chapter 12:Unit 12.1.
[2] http://www.tbi.univie.ac.at/RNA/tutorial/node6.html
'''
import shlex
import subprocess
import tempfile
import re
def make_constraints(seed_match, seq_type):
if seq_type == 'miseq':
seed_match = seed_match.replace('x','.')
seed_match = seed_match.replace('A','.')
seed_match = seed_match.replace(':','(')
seed_match = seed_match.replace('|','(')
return seed_match
elif seq_type == 'targetseq': #Reverse
seed_match_rev = seed_match[-1::-1]
seed_match_rev = seed_match_rev.replace('x','.')
seed_match_rev = seed_match_rev.replace('A','.')
seed_match_rev = seed_match_rev.replace(':',')')
seed_match_rev = seed_match_rev.replace('|',')')
return seed_match_rev
def viennaRNA_RNAcofold(seqs, constraints, option_postscript=False, option_constraints=True, option_partfunc=True, option_temperature=True):
command_RNAcofold = 'RNAcofold --noPS --constraint --partfunc --temp=37'
args = shlex.split(command_RNAcofold)
test_str = "\n".join([seqs, constraints]) + "\n\n" + '@' + '\n'
p = subprocess.Popen(args,stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=tempfile.gettempdir())
stdout, stderr = p.communicate("\n".join([seqs, constraints]).encode('utf-8')) #cannot use type 'str' for communicate...
return stdout, stderr
#b'UUCAAGUA&UACUUGAA\n.(((((((&))))))). (-16.90)\n,(((((((&))))))), [-17.56]\n frequency of mfe structure in ensemble 0.342276 , delta G binding= -7.56\n'
def regex_RNAcofold(seq):
regex = r'.+\n(?P<str_mfe>\S+) \((?P<mfe>.+)\)\n(?P<str_ens>\S+) \[(?P<ens>.+)\]\n frequency of mfe structure in ensemble (?P<ens_frequency>\S+) , delta G binding=(?P<delta_G>.+)\n'
seq = seq.decode('utf-8')
#print (seq)
decoded_seq = re.match(regex, seq)
str_mfe = decoded_seq.group('str_mfe')
mfe = decoded_seq.group('mfe')
str_ens = decoded_seq.group('str_ens')
ens = decoded_seq.group('ens')
delta_G = decoded_seq.group('delta_G')
return str_mfe, mfe, str_ens, ens, delta_G
def calc_thermo(mirna_seq, targetrna_seq, targetrna_range, tmp_dict):
mirna_length = len(mirna_seq) #miRNA sequence length
targetrna_length = len(targetrna_seq)
targetrna_range = 30 #Searched around 70nt
around_nt_right = ''
around_nt_left = ''
if mirna_length % 2 == 0: #Even number
around_nt_right = int((targetrna_range - mirna_length) / 2)
around_nt_left = int((targetrna_range - mirna_length) / 2)
else: #Odd number
around_nt_right = int((targetrna_range - mirna_length - 1) / 2)
around_nt_left = int((targetrna_range - mirna_length + 1) / 2)
#miRNA_region
mirna_seed = mirna_seq[0:8] #miRNA_seed_region
mature_mirna = mirna_seq #mature_miRNA
thermo_targetseq = '' #TargetRNA sequence for thermo calc.
for x in list(tmp_dict.keys()):
#print(x)
mirna_infor = x
mirna_data = mirna_infor.split('||')
mirna_name = mirna_data[0]
#TargetRNA_st_ed
targetrna_ed = int(mirna_data[3]) #1nt - seed_region / end_site for miRNA-binding
targetrna_st = targetrna_ed - mirna_length + 1 #8nt - seed_region / start_site for miRNA-binding
#if (targetrna_st - around_nt_right) <= 0:
# print ('WARNINGS: ' + x)
# continue
#if (targetrna_ed + around_nt_left) > targetrna_length:
# print ('WARNINGS: ' + x)
# continue
#thermo_targetseq_st = targetrna_st - around_nt_right - 1
#thermo_targetseq_ed = targetrna_ed + around_nt_left
#Targetrna_region
candidate_target_site = ''
if not targetrna_st-1 < 0:
candidate_target_site = targetrna_seq[targetrna_st-1:targetrna_ed]
else:
candidate_target_site = 'NA'
#print(targetrna_st-1)
targetrna_seed_region = targetrna_seq[targetrna_ed-8:targetrna_ed]
#local_targetrna_region = targetrna_seq[thermo_targetseq_st:thermo_targetseq_ed] #TargetRNA sequence for thermo calc.
#Calculated pairs
test_seq1 = '&'.join([mirna_seed,targetrna_seed_region])
test_seq2 = '&'.join([mature_mirna,candidate_target_site])
#test_seq3 = '&'.join([mature_mirna,local_targetrna_region])
#constraints
c_miseq = ''
c_targetseq = ''
seed_match = (tmp_dict[x])[4] #NEED TO CHECK
reside_miseq_targetseq = mirna_length - 8 #miseq - seed_region
seed_match_miseq = make_constraints(seed_match,'miseq')
c_miseq_seed = seed_match_miseq
c_miseq = seed_match_miseq + reside_miseq_targetseq * '.'
seed_match_targetseq = make_constraints(seed_match,'targetseq')
c_targetseq_seed = seed_match_targetseq
c_targetseq_site = reside_miseq_targetseq * '.' + seed_match_targetseq
#c_targetseq = around_nt_right * '.' + reside_miseq_targetseq * '.' + seed_match_targetseq + around_nt_left * '.'
test_constraints1 = '&'.join([c_miseq_seed,c_targetseq_seed])
test_constraints2 = '&'.join([c_miseq,c_targetseq_site])
#test_constraints3 = '&'.join([c_miseq,c_targetseq])
#debug
#print (test_seq1)
#print (test_constraints1)
#print (test_seq2)
#print (test_constraints2)
#print (test_seq3)
#print (test_constraints3)
#RNAcofold_command
stdout1, stderr1 = viennaRNA_RNAcofold(test_seq1, test_constraints1) #test1
stdout2 = ''
stderr2 = ''
if not candidate_target_site == 'NA':
stdout2, stderr2 = viennaRNA_RNAcofold(test_seq2, test_constraints2) #test2
else:
stdout2 = 'NA'
#stdout3, stderr3 = viennaRNA_RNAcofold(test_seq3, test_constraints3) #Test3
#print (stdout1)
#print (stdout2)
#print (stdout3)
#print (stderr)
#Seed_matching
str_mfe_seed, mfe_seed, str_ens_seed, ens_seed, delta_G_seed = regex_RNAcofold(stdout1)
mfe_seed = mfe_seed.strip()
ens_seed = ens_seed.strip()
delta_G_seed = delta_G_seed.strip()
out1_list = [str_mfe_seed, mfe_seed, str_ens_seed, ens_seed, delta_G_seed]
tmp_dict[x].extend(out1_list)
#miRNA-target_site matching
if not stdout2 == 'NA':
str_mfe, mfe, str_ens, ens, delta_G = regex_RNAcofold(stdout2)
mfe = mfe.strip()
ens = ens.strip()
delta_G = delta_G.strip()
out2_list = [str_mfe, mfe, str_ens, ens, delta_G]
tmp_dict[x].extend(out2_list)
#3'pairing contribution
diff_mfe = float(mfe) - float(mfe_seed)
diff_ens = float(ens) - float(ens_seed)
diff_delta_G = float(delta_G) - float(delta_G_seed)
out3_list = [diff_mfe, diff_ens, diff_delta_G]
tmp_dict[x].extend(out3_list)
else:
tmp_dict[x].extend(['near_stop_codon','NA','NA','NA','NA'])
tmp_dict[x].extend(['NA','NA','NA'])
#print ('str_mfe: ' + str_mfe)
#print ('mfe: ' + mfe)
#print ('str_ens: ' + str_ens)
#print ('ens: ' + ens)
#print ('delta_G: ' + delta_G)
return tmp_dict
| Naoto-Imamachi/MIRAGE | scripts/module/analysis/thermo_calc.py | Python | mit | 8,126 |
"""The :mod:`pyts.multivariate.utils` module includes utility tools."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
from sklearn.utils import check_array
def check_3d_array(X):
"""Check that the input is a three-dimensional array.
Parameters
----------
X : array-like
Input data.
Returns
-------
X_new : array
Input data as an array.
"""
X = check_array(X, ensure_2d=False, allow_nd=True)
if X.ndim != 3:
raise ValueError("X must be 3-dimensional (got {0}).".format(X.ndim))
return X
| johannfaouzi/pyts | pyts/multivariate/utils/utils.py | Python | bsd-3-clause | 589 |
"""A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_fileobject',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import os
try:
import queue
except:
import Queue as queue
import re
import rfc822
import socket
import sys
if 'win' in sys.platform and hasattr(socket, "AF_INET6"):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
try:
import cStringIO as StringIO
except ImportError:
import StringIO
DEFAULT_BUFFER_SIZE = -1
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import threading
import time
import traceback
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
import operator
from urllib import unquote
import warnings
if sys.version_info >= (3, 0):
bytestr = bytes
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
bytestr = str
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']]
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://bitbucket.org/cherrypy/cherrypy/issue/421
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server= server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = ntob("http")
if self.server.ssl_adapter is not None:
self.scheme = ntob("https")
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response("414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
else:
if not success:
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
rp = int(req_protocol[5]), int(req_protocol[7])
except (ValueError, IndexError):
self.simple_response("400 Bad Request", "Malformed Request-Line")
return False
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if NUMBER_SIGN in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return False
if scheme:
self.scheme = scheme
qs = EMPTY
if QUESTION_MARK in path:
path, qs = path.split(QUESTION_MARK, 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [unquote(x) for x in quoted_slash.split(path)]
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
path = "%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return False
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
return True
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get("Connection", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get("Connection", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get("Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get("Expect", "") == "100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See https://bitbucket.org/cherrypy/cherrypy/issue/951
msg = self.server.protocol + " 100 Continue\r\n\r\n"
try:
self.conn.wfile.sendall(msg)
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == ASTERISK:
return None, None, uri
i = uri.find('://')
if i > 0 and QUESTION_MARK not in uri[:i]:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
scheme, remainder = uri[:i].lower(), uri[i + 3:]
authority, path = remainder.split(FORWARD_SLASH, 1)
path = FORWARD_SLASH + path
return scheme, authority, path
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [self.server.protocol + SPACE +
status + CRLF,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append("Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall(EMPTY.join(buf))
else:
self.conn.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall(EMPTY.join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def __init__(self, *args, **kwargs):
self.bytes_read = 0
self.bytes_written = 0
socket._fileobject.__init__(self, *args, **kwargs)
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
bytes_sent = self._sock.send(data)
self.bytes_written += bytes_sent
return bytes_sent
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
data = self._sock.recv(size)
self.bytes_read += len(data)
return data
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
if not _fileobject_uses_str_type:
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_fileobject):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if errnum == 'timed out' or errnum == 'The read operation timed out':
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See https://bitbucket.org/cherrypy/cherrypy/issue/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
self.server.error_log("socket.error %s" % repr(errnum),
level=logging.WARNING, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
e = sys.exc_info()[1]
self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
class TrueyZero(object):
"""An object which equals and does math like the integer '0' but evals True."""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen),
'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read),
'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written),
'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time),
'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit):
exc = sys.exc_info()[1]
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not self._all(operator.attrgetter('ready'), workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
worker.start()
return worker
def _all(func, items):
results = [func(item) for item in items]
return reduce(operator.and_, results, True)
_all = staticmethod(_all)
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See https://bitbucket.org/cherrypy/cherrypy/issue/691.
KeyboardInterrupt):
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
import ctypes.wintypes
_SetHandleInformation = windll.kernel32.SetHandleInformation
_SetHandleInformation.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
]
_SetHandleInformation.restype = ctypes.wintypes.BOOL
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit)."""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/3.2.4"
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Read'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Written'](w) for w
in s['Worker Threads'].values()], 0),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Time'](w) for w
in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# SSL backward compatibility
if (self.ssl_adapter is None and
getattr(self, 'ssl_certificate', None) and
getattr(self, 'ssl_private_key', None)):
warnings.warn(
"SSL attributes are deprecated in CherryPy 3.2, and will "
"be removed in CherryPy 3.3. Use an ssl_adapter attribute "
"instead.",
DeprecationWarning
)
try:
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
except ImportError:
pass
else:
self.ssl_adapter = pyOpenSSLAdapter(
self.ssl_certificate, self.ssl_private_key,
getattr(self, 'ssl_certificate_chain', None))
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 511) # 0777
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error, serr:
msg = "%s -- (%s: %s)" % (msg, sa, serr)
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.error_log("Error in HTTPServer.tick", level=logging.ERROR,
traceback=True)
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def error_log(self, msg="", level=20, traceback=False):
# Override this in subclasses as desired
sys.stderr.write(msg + '\n')
sys.stderr.flush()
if traceback:
tblines = format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See https://bitbucket.org/cherrypy/cherrypy/issue/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_fileobject
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See https://bitbucket.org/cherrypy/cherrypy/issue/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See https://bitbucket.org/cherrypy/cherrypy/issue/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See https://bitbucket.org/cherrypy/cherrypy/issue/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See https://bitbucket.org/cherrypy/cherrypy/issue/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
"""A base class to interface HTTPServer with other systems, such as WSGI."""
def __init__(self, req):
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='pyopenssl'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# -------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
"""A subclass of HTTPServer which calls a WSGI application."""
wsgi_version = (1, 0)
"""The version of WSGI to produce."""
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
"""A base class to interface HTTPServer with WSGI."""
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
"""Process the current request."""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for k, v in headers:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not of type str." % k)
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not of type str." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response("500 Internal Server Error",
"The requested resource returned more bytes than the "
"declared Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
"""A Gateway class to interface HTTPServer with WSGI 1.0.x."""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path,
'QUERY_STRING': req.qs,
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method,
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol,
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme,
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.iteritems():
env["HTTP_" + k.upper().replace("-", "_")] = v
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
"""A Gateway class to interface HTTPServer with WSGI u.0.
WSGI u.0 is an experimental protocol, which uses unicode for keys and values
in both Python 2 and Python 3.
"""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for k, v in sorted(env.items()):
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
env[k] = v.decode('ISO-8859-1')
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = list(apps.items())
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
| paolodoz/timesheet | cherrypy/wsgiserver/wsgiserver2.py | Python | gpl-2.0 | 88,589 |
#! /usr/bin/env python3
from ws.interactive import edit_interactive
from ws.client import API
from ws.utils import dmerge
from ws.parser_helpers.title import canonicalize
from ws.ArchWiki import lang
import mwparserfromhell
def page_language(page):
return lang.detect_language(page["title"])[1]
def edit(api: API, page, templates):
print(f"Parsing '{page['title']}'...")
text = page["revisions"][0]["slots"]["main"]["*"]
timestamp = page["revisions"][0]["timestamp"]
code = mwparserfromhell.parse(text)
for curTemplate in code.filter_templates():
# skip localized templates
if lang.detect_language(str(curTemplate.name))[1] != "English":
continue
# check if curTemplate matches a feasible template
curName = canonicalize(curTemplate.name)
if f"Template:{curName}" not in templates:
continue
# check if curTemplate has a localized variant
localized = lang.format_title(curName, page_language(page))
if f"Template:{localized}" not in templates:
continue
# localize the template
curTemplate.name = localized
if str(code) != text:
if __name__ == "__main__":
if "bot" in api.user.rights:
edit_interactive(api, page["title"], page["pageid"], text, str(code), timestamp, "localize templates", bot="")
else:
edit_interactive(api, page["title"], page["pageid"], text, str(code), timestamp, "localize templates")
else:
api.edit(page['title'], page['pageid'], str(code), timestamp, "localize templates", bot="")
def main(api: API):
print("Getting page IDs...")
pageids = set()
templates = set()
for template in api.list(list="allpages",
apnamespace="10",
apfilterlanglinks="withlanglinks",
aplimit="max"):
templates.add(template["title"])
if page_language(template) == "English":
# get IDs of the pages using this template
for page in api.generator(generator="embeddedin",
geifilterredir="nonredirects",
geilimit="max",
geititle=template["title"]):
if page_language(page) != "English":
pageids.add(page["pageid"])
print(f"Fetched {len(pageids)} pages.")
print("Getting page contents...")
result = {}
for chunk in api.call_api_autoiter_ids(action="query",
pageids=pageids,
prop="revisions",
rvprop="content|timestamp",
rvslots="main"):
dmerge(chunk, result)
pages = result["pages"]
for page in pages.values():
edit(api, page, templates)
if __name__ == "__main__":
import ws.config
api = ws.config.object_from_argparser(API, description="Replace unlocalised templates in localised pages with the localised templates")
main(api)
| lahwaacz/wiki-scripts | localize-templates.py | Python | gpl-3.0 | 3,177 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
class FleetVehicleCost(models.Model):
_name = 'fleet.vehicle.cost'
_description = 'Cost related to a vehicle'
_order = 'date desc, vehicle_id asc'
name = fields.Char(related='vehicle_id.name', string='Name', store=True)
vehicle_id = fields.Many2one('fleet.vehicle', 'Vehicle', required=True, help='Vehicle concerned by this log')
cost_subtype_id = fields.Many2one('fleet.service.type', 'Type', help='Cost type purchased with this cost')
amount = fields.Float('Total Price')
cost_type = fields.Selection([('contract', 'Contract'), ('services', 'Services'), ('fuel', 'Fuel'), ('other', 'Other')],
'Category of the cost', default="other", help='For internal purpose only', required=True)
parent_id = fields.Many2one('fleet.vehicle.cost', 'Parent', help='Parent cost to this current cost')
cost_ids = fields.One2many('fleet.vehicle.cost', 'parent_id', 'Included Services')
odometer_id = fields.Many2one('fleet.vehicle.odometer', 'Odometer', help='Odometer measure of the vehicle at the moment of this log')
odometer = fields.Float(compute="_get_odometer", inverse='_set_odometer', string='Odometer Value', help='Odometer measure of the vehicle at the moment of this log')
odometer_unit = fields.Selection(related='vehicle_id.odometer_unit', string="Unit", readonly=True)
date = fields.Date(help='Date when the cost has been executed')
contract_id = fields.Many2one('fleet.vehicle.log.contract', 'Contract', help='Contract attached to this cost')
auto_generated = fields.Boolean('Automatically Generated', readonly=True)
def _get_odometer(self):
for record in self:
if record.odometer_id:
record.odometer = record.odometer_id.value
def _set_odometer(self):
for record in self:
if not record.odometer:
raise UserError(_('Emptying the odometer value of a vehicle is not allowed.'))
odometer = self.env['fleet.vehicle.odometer'].create({
'value': record.odometer,
'date': record.date or fields.Date.context_today(record),
'vehicle_id': record.vehicle_id.id
})
self.odometer_id = odometer
@api.model
def create(self, data):
#make sure that the data are consistent with values of parent and contract records given
if 'parent_id' in data and data['parent_id']:
parent = self.browse(data['parent_id'])
data['vehicle_id'] = parent.vehicle_id.id
data['date'] = parent.date
data['cost_type'] = parent.cost_type
if 'contract_id' in data and data['contract_id']:
contract = self.env['fleet.vehicle.log.contract'].browse(data['contract_id'])
data['vehicle_id'] = contract.vehicle_id.id
data['cost_subtype_id'] = contract.cost_subtype_id.id
data['cost_type'] = contract.cost_type
if 'odometer' in data and not data['odometer']:
#if received value for odometer is 0, then remove it from the data as it would result to the creation of a
#odometer log with 0, which is to be avoided
del(data['odometer'])
return super(FleetVehicleCost, self).create(data)
class FleetVehicleTag(models.Model):
_name = 'fleet.vehicle.tag'
name = fields.Char(required=True, translate=True)
color = fields.Integer('Color Index')
_sql_constraints = [('name_uniq', 'unique (name)', "Tag name already exists !")]
class FleetVehicleState(models.Model):
_name = 'fleet.vehicle.state'
_order = 'sequence asc'
name = fields.Char(required=True)
sequence = fields.Integer(help="Used to order the note stages")
_sql_constraints = [('fleet_state_name_unique', 'unique(name)', 'State name already exists')]
class FleetVehicleModel(models.Model):
_name = 'fleet.vehicle.model'
_description = 'Model of a vehicle'
_order = 'name asc'
name = fields.Char('Model name', required=True)
brand_id = fields.Many2one('fleet.vehicle.model.brand', 'Make', required=True, help='Make of the vehicle')
vendors = fields.Many2many('res.partner', 'fleet_vehicle_model_vendors', 'model_id', 'partner_id', string='Vendors')
image = fields.Binary(related='brand_id.image', string="Logo")
image_medium = fields.Binary(related='brand_id.image_medium', string="Logo (medium)")
image_small = fields.Binary(related='brand_id.image_small', string="Logo (small)")
@api.multi
@api.depends('name', 'brand_id')
def name_get(self):
res = []
for record in self:
name = record.name
if record.brand_id.name:
name = record.brand_id.name + '/' + name
res.append((record.id, name))
return res
@api.onchange('brand_id')
def _onchange_brand(self):
if self.brand_id:
self.image_medium = self.brand_id.image
else:
self.image_medium = False
class FleetVehicleModelBrand(models.Model):
_name = 'fleet.vehicle.model.brand'
_description = 'Brand model of the vehicle'
_order = 'name asc'
name = fields.Char('Make', required=True)
image = fields.Binary("Logo", attachment=True,
help="This field holds the image used as logo for the brand, limited to 1024x1024px.")
image_medium = fields.Binary("Medium-sized image", attachment=True,
help="Medium-sized logo of the brand. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary("Small-sized image", attachment=True,
help="Small-sized logo of the brand. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(FleetVehicleModelBrand, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(FleetVehicleModelBrand, self).write(vals)
class FleetVehicle(models.Model):
_inherit = 'mail.thread'
_name = 'fleet.vehicle'
_description = 'Information on a vehicle'
_order = 'license_plate asc'
def _get_default_state(self):
state = self.env.ref('fleet.vehicle_state_active', raise_if_not_found=False)
return state and state.id or False
name = fields.Char(compute="_compute_vehicle_name", store=True)
active = fields.Boolean(default=True)
company_id = fields.Many2one('res.company', 'Company')
license_plate = fields.Char(required=True, help='License plate number of the vehicle (i = plate number for a car)')
vin_sn = fields.Char('Chassis Number', help='Unique number written on the vehicle motor (VIN/SN number)', copy=False)
driver_id = fields.Many2one('res.partner', 'Driver', help='Driver of the vehicle')
model_id = fields.Many2one('fleet.vehicle.model', 'Model', required=True, help='Model of the vehicle')
log_fuel = fields.One2many('fleet.vehicle.log.fuel', 'vehicle_id', 'Fuel Logs')
log_services = fields.One2many('fleet.vehicle.log.services', 'vehicle_id', 'Services Logs')
log_contracts = fields.One2many('fleet.vehicle.log.contract', 'vehicle_id', 'Contracts')
cost_count = fields.Integer(compute="_compute_count_all", string="Costs")
contract_count = fields.Integer(compute="_compute_count_all", string='Contracts')
service_count = fields.Integer(compute="_compute_count_all", string='Services')
fuel_logs_count = fields.Integer(compute="_compute_count_all", string='Fuel Logs')
odometer_count = fields.Integer(compute="_compute_count_all", string='Odometer')
acquisition_date = fields.Date('Acquisition Date', required=False, help='Date when the vehicle has been bought')
color = fields.Char(help='Color of the vehicle')
state_id = fields.Many2one('fleet.vehicle.state', 'State', default=_get_default_state, help='Current state of the vehicle', ondelete="set null")
location = fields.Char(help='Location of the vehicle (garage, ...)')
seats = fields.Integer('Seats Number', help='Number of seats of the vehicle')
doors = fields.Integer('Doors Number', help='Number of doors of the vehicle', default=5)
tag_ids = fields.Many2many('fleet.vehicle.tag', 'fleet_vehicle_vehicle_tag_rel', 'vehicle_tag_id', 'tag_id', 'Tags', copy=False)
odometer = fields.Float(compute='_get_odometer', inverse='_set_odometer', string='Last Odometer', help='Odometer measure of the vehicle at the moment of this log')
odometer_unit = fields.Selection([('kilometers', 'Kilometers'), ('miles', 'Miles')],
'Odometer Unit', default='kilometers', help='Unit of the odometer ', required=True)
transmission = fields.Selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle')
fuel_type = fields.Selection([('gasoline', 'Gasoline'), ('diesel', 'Diesel'), ('electric', 'Electric'), ('hybrid', 'Hybrid')], 'Fuel Type', help='Fuel Used by the vehicle')
horsepower = fields.Integer()
horsepower_tax = fields.Float('Horsepower Taxation')
power = fields.Integer('Power', help='Power in kW of the vehicle')
co2 = fields.Float('CO2 Emissions', help='CO2 emissions of the vehicle')
image = fields.Binary(related='model_id.image', string="Logo")
image_medium = fields.Binary(related='model_id.image_medium', string="Logo (medium)")
image_small = fields.Binary(related='model_id.image_small', string="Logo (small)")
contract_renewal_due_soon = fields.Boolean(compute='_compute_contract_reminder', search='_search_contract_renewal_due_soon', string='Has Contracts to renew', multi='contract_info')
contract_renewal_overdue = fields.Boolean(compute='_compute_contract_reminder', search='_search_get_overdue_contract_reminder', string='Has Contracts Overdue', multi='contract_info')
contract_renewal_name = fields.Text(compute='_compute_contract_reminder', string='Name of contract to renew soon', multi='contract_info')
contract_renewal_total = fields.Text(compute='_compute_contract_reminder', string='Total of contracts due or overdue minus one', multi='contract_info')
car_value = fields.Float(help='Value of the bought vehicle')
@api.depends('model_id', 'license_plate')
def _compute_vehicle_name(self):
for record in self:
record.name = record.model_id.brand_id.name + '/' + record.model_id.name + '/' + record.license_plate
def _get_odometer(self):
FleetVehicalOdometer = self.env['fleet.vehicle.odometer']
for record in self:
vehicle_odometer = FleetVehicalOdometer.search([('vehicle_id', '=', record.id)], limit=1, order='value desc')
if vehicle_odometer:
record.odometer = vehicle_odometer.value
else:
record.odometer = 0
def _set_odometer(self):
for record in self:
if record.odometer:
date = fields.Date.context_today(record)
data = {'value': record.odometer, 'date': date, 'vehicle_id': record.id}
self.env['fleet.vehicle.odometer'].create(data)
def _compute_count_all(self):
Odometer = self.env['fleet.vehicle.odometer']
LogFuel = self.env['fleet.vehicle.log.fuel']
LogService = self.env['fleet.vehicle.log.services']
LogContract = self.env['fleet.vehicle.log.contract']
Cost = self.env['fleet.vehicle.cost']
for record in self:
record.odometer_count = Odometer.search_count([('vehicle_id', '=', self.id)])
record.fuel_logs_count = LogFuel.search_count([('vehicle_id', '=', self.id)])
record.service_count = LogService.search_count([('vehicle_id', '=', self.id)])
record.contract_count = LogContract.search_count([('vehicle_id', '=', self.id)])
record.cost_count = Cost.search_count([('vehicle_id', '=', self.id), ('parent_id', '=', False)])
@api.depends('log_contracts')
def _compute_contract_reminder(self):
for record in self:
overdue = False
due_soon = False
total = 0
name = ''
for element in record.log_contracts:
if element.state in ('open', 'toclose') and element.expiration_date:
current_date_str = fields.Date.context_today(record)
due_time_str = element.expiration_date
current_date = fields.Date.from_string(current_date_str)
due_time = fields.Date.from_string(due_time_str)
diff_time = (due_time - current_date).days
if diff_time < 0:
overdue = True
total += 1
if diff_time < 15 and diff_time >= 0:
due_soon = True
total += 1
if overdue or due_soon:
log_contract = self.env['fleet.vehicle.log.contract'].search([('vehicle_id', '=', record.id), ('state', 'in', ('open', 'toclose'))],
limit=1, order='expiration_date asc')
if log_contract:
#we display only the name of the oldest overdue/due soon contract
name = log_contract.cost_subtype_id.name
record.contract_renewal_overdue = overdue
record.contract_renewal_due_soon = due_soon
record.contract_renewal_total = total - 1 # we remove 1 from the real total for display purposes
record.contract_renewal_name = name
def _search_contract_renewal_due_soon(self, operator, value):
res = []
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value is True) or (operator in ('<>', '!=') and value is False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.Date.context_today(self)
datetime_today = fields.Datetime.from_string(today)
limit_date = fields.Datetime.to_string(datetime_today + relativedelta(days=+15))
self.env.cr.execute("""SELECT cost.vehicle_id,
count(contract.id) AS contract_number
FROM fleet_vehicle_cost cost
LEFT JOIN fleet_vehicle_log_contract contract ON contract.cost_id = cost.id
WHERE contract.expiration_date IS NOT NULL
AND contract.expiration_date > %s
AND contract.expiration_date < %s
AND contract.state IN ('open', 'toclose')
GROUP BY cost.vehicle_id""", (today, limit_date))
res_ids = [x[0] for x in self.env.cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _search_get_overdue_contract_reminder(self, operator, value):
res = []
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value is True) or (operator in ('<>', '!=') and value is False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.Date.context_today(self)
self.env.cr.execute('''SELECT cost.vehicle_id,
count(contract.id) AS contract_number
FROM fleet_vehicle_cost cost
LEFT JOIN fleet_vehicle_log_contract contract ON contract.cost_id = cost.id
WHERE contract.expiration_date IS NOT NULL
AND contract.expiration_date < %s
AND contract.state IN ('open', 'toclose')
GROUP BY cost.vehicle_id ''', (today,))
res_ids = [x[0] for x in self.env.cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
@api.onchange('model_id')
def _onchange_model(self):
if self.model_id:
self.image_medium = self.model_id.image
else:
self.image_medium = False
@api.model
def create(self, data):
vehicle = super(FleetVehicle, self.with_context(mail_create_nolog=True)).create(data)
vehicle.message_post(body=_('%s %s has been added to the fleet!') % (vehicle.model_id.name, vehicle.license_plate))
return vehicle
@api.multi
def write(self, vals):
"""
This function write an entry in the openchatter whenever we change important information
on the vehicle like the model, the drive, the state of the vehicle or its license plate
"""
for vehicle in self:
changes = []
if 'model_id' in vals and vehicle.model_id.id != vals['model_id']:
value = self.env['fleet.vehicle.model'].browse(vals['model_id']).name
oldmodel = vehicle.model_id.name or _('None')
changes.append(_("Model: from '%s' to '%s'") % (oldmodel, value))
if 'driver_id' in vals and vehicle.driver_id.id != vals['driver_id']:
value = self.env['res.partner'].browse(vals['driver_id']).name
olddriver = (vehicle.driver_id.name) or _('None')
changes.append(_("Driver: from '%s' to '%s'") % (olddriver, value))
if 'state_id' in vals and vehicle.state_id.id != vals['state_id']:
value = self.env['fleet.vehicle.state'].browse(vals['state_id']).name
oldstate = vehicle.state_id.name or _('None')
changes.append(_("State: from '%s' to '%s'") % (oldstate, value))
if 'license_plate' in vals and vehicle.license_plate != vals['license_plate']:
old_license_plate = vehicle.license_plate or _('None')
changes.append(_("License Plate: from '%s' to '%s'") % (old_license_plate, vals['license_plate']))
if len(changes) > 0:
self.message_post(body=", ".join(changes))
return super(FleetVehicle, self).write(vals)
@api.multi
def return_action_to_open(self):
""" This opens the xml view specified in xml_id for the current vehicle """
self.ensure_one()
xml_id = self.env.context.get('xml_id')
if xml_id:
res = self.env['ir.actions.act_window'].for_xml_id('fleet', xml_id)
res.update(
context=dict(self.env.context, default_vehicle_id=self.id, group_by=False),
domain=[('vehicle_id', '=', self.id)]
)
return res
return False
@api.multi
def act_show_log_cost(self):
""" This opens log view to view and add new log for this vehicle, groupby default to only show effective costs
@return: the costs log view
"""
self.ensure_one()
res = self.env['ir.actions.act_window'].for_xml_id('fleet', 'fleet_vehicle_costs_action')
res.update(
context=dict(self.env.context, default_vehicle_id=self.id, search_default_parent_false=True),
domain=[('vehicle_id', '=', self.id)]
)
return res
class FleetVehicleOdometer(models.Model):
_name = 'fleet.vehicle.odometer'
_description = 'Odometer log for a vehicle'
_order = 'date desc'
name = fields.Char(compute='_compute_vehicle_log_name', store=True)
date = fields.Date(default=fields.Date.context_today)
value = fields.Float('Odometer Value', group_operator="max")
vehicle_id = fields.Many2one('fleet.vehicle', 'Vehicle', required=True)
unit = fields.Selection(related='vehicle_id.odometer_unit', string="Unit", readonly=True)
@api.depends('vehicle_id', 'date')
def _compute_vehicle_log_name(self):
for record in self:
name = record.vehicle_id.name
if not name:
name = record.date
elif record.date:
name += ' / ' + record.date
self.name = name
@api.onchange('vehicle_id')
def _onchange_vehicle(self):
if self.vehicle_id:
self.unit = self.vehicle_id.odometer_unit
class FleetVehicleLogFuel(models.Model):
_name = 'fleet.vehicle.log.fuel'
_description = 'Fuel log for vehicles'
_inherits = {'fleet.vehicle.cost': 'cost_id'}
@api.model
def default_get(self, default_fields):
res = super(FleetVehicleLogFuel, self).default_get(default_fields)
service = self.env.ref('fleet.type_service_refueling', raise_if_not_found=False)
res.update({
'date': fields.Date.context_today(self),
'cost_subtype_id': service and service.id or False,
'cost_type': 'fuel'
})
return res
liter = fields.Float()
price_per_liter = fields.Float()
purchaser_id = fields.Many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]")
inv_ref = fields.Char('Invoice Reference', size=64)
vendor_id = fields.Many2one('res.partner', 'Vendor', domain="[('supplier','=',True)]")
notes = fields.Text()
cost_id = fields.Many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade')
cost_amount = fields.Float(related='cost_id.amount', string='Amount', store=True) # we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
@api.onchange('vehicle_id')
def _onchange_vehicle(self):
if self.vehicle_id:
self.odometer_unit = self.vehicle_id.odometer_unit
self.purchaser_id = self.vehicle_id.driver_id.id
@api.onchange('liter', 'price_per_liter', 'amount')
def _onchange_liter_price_amount(self):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(self.liter)
price_per_liter = float(self.price_per_liter)
amount = float(self.amount)
if liter > 0 and price_per_liter > 0 and round(liter * price_per_liter, 2) != amount:
self.amount = round(liter * price_per_liter, 2)
elif amount > 0 and liter > 0 and round(amount / liter, 2) != price_per_liter:
self.price_per_liter = round(amount / liter, 2)
elif amount > 0 and price_per_liter > 0 and round(amount / price_per_liter, 2) != liter:
self.liter = round(amount / price_per_liter, 2)
class FleetVehicleLogServices(models.Model):
_name = 'fleet.vehicle.log.services'
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_description = 'Services for vehicles'
@api.model
def default_get(self, default_fields):
res = super(FleetVehicleLogServices, self).default_get(default_fields)
service = self.env.ref('fleet.type_service_service_8', raise_if_not_found=False)
res.update({
'date': fields.Date.context_today(self),
'cost_subtype_id': service and service.id or False,
'cost_type': 'services'
})
return res
purchaser_id = fields.Many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]")
inv_ref = fields.Char('Invoice Reference')
vendor_id = fields.Many2one('res.partner', 'Vendor', domain="[('supplier','=',True)]")
# we need to keep this field as a related with store=True because the graph view doesn't support
# (1) to address fields from inherited table and (2) fields that aren't stored in database
cost_amount = fields.Float(related='cost_id.amount', string='Amount', store=True)
notes = fields.Text()
cost_id = fields.Many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade')
@api.onchange('vehicle_id')
def _onchange_vehicle(self):
if self.vehicle_id:
self.odometer_unit = self.vehicle_id.odometer_unit
self.purchaser_id = self.vehicle_id.driver_id.id
class FleetServiceType(models.Model):
_name = 'fleet.service.type'
_description = 'Type of services available on a vehicle'
name = fields.Char(required=True, translate=True)
category = fields.Selection([('contract', 'Contract'), ('service', 'Service'), ('both', 'Both')], 'Category',
required=True, help='Choose wheter the service refer to contracts, vehicle services or both')
class FleetVehicleLogContract(models.Model):
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.contract'
_description = 'Contract information on a vehicle'
_order = 'state desc,expiration_date'
def compute_next_year_date(self, strdate):
oneyear = relativedelta(years=1)
start_date = fields.Date.from_string(strdate)
return fields.Date.to_string(start_date + oneyear)
@api.model
def default_get(self, default_fields):
res = super(FleetVehicleLogContract, self).default_get(default_fields)
contract = self.env.ref('fleet.type_contract_leasing', raise_if_not_found=False)
res.update({
'date': fields.Date.context_today(self),
'cost_subtype_id': contract and contract.id or False,
'cost_type': 'contract'
})
return res
name = fields.Text(compute='_compute_contract_name', store=True)
active = fields.Boolean(default=True)
start_date = fields.Date('Contract Start Date', default=fields.Date.context_today, help='Date when the coverage of the contract begins')
expiration_date = fields.Date('Contract Expiration Date', default=lambda self: self.compute_next_year_date(fields.Date.context_today(self)),
help='Date when the coverage of the contract expirates (by default, one year after begin date)')
days_left = fields.Integer(compute='_compute_days_left', string='Warning Date')
insurer_id = fields.Many2one('res.partner', 'Vendor')
purchaser_id = fields.Many2one('res.partner', 'Contractor', default=lambda self: self.env.user.partner_id.id, help='Person to which the contract is signed for')
ins_ref = fields.Char('Contract Reference', size=64, copy=False)
state = fields.Selection([('open', 'In Progress'), ('toclose', 'To Close'), ('closed', 'Terminated')],
'Status', default='open', readonly=True, help='Choose wheter the contract is still valid or not',
copy=False)
notes = fields.Text('Terms and Conditions', help='Write here all supplementary information relative to this contract', copy=False)
cost_generated = fields.Float('Recurring Cost Amount', help="Costs paid at regular intervals, depending on the cost frequency."
"If the cost frequency is set to unique, the cost will be logged at the start date")
cost_frequency = fields.Selection([('no', 'No'), ('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('yearly', 'Yearly')], 'Recurring Cost Frequency',
default='no', help='Frequency of the recuring cost', required=True)
generated_cost_ids = fields.One2many('fleet.vehicle.cost', 'contract_id', 'Generated Costs')
sum_cost = fields.Float(compute='_compute_sum_cost', string='Indicative Costs Total')
cost_id = fields.Many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade')
cost_amount = fields.Float(related='cost_id.amount', string='Amount', store=True) # we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
@api.depends('vehicle_id', 'cost_subtype_id', 'date')
def _compute_contract_name(self):
for record in self:
name = record.vehicle_id.name
if record.cost_subtype_id.name:
name += ' / ' + record.cost_subtype_id.name
if record.date:
name += ' / ' + record.date
record.name = name
@api.depends('expiration_date', 'state')
def _compute_days_left(self):
"""return a dict with as value for each contract an integer
if contract is in an open state and is overdue, return 0
if contract is in a closed state, return -1
otherwise return the number of days before the contract expires
"""
for record in self:
if (record.expiration_date and (record.state == 'open' or record.state == 'toclose')):
today = fields.Date.from_string(fields.Date.today())
renew_date = fields.Date.from_string(record.expiration_date)
diff_time = (renew_date - today).days
record.days_left = diff_time > 0 and diff_time or 0
else:
record.days_left = -1
@api.depends('cost_ids.amount')
def _compute_sum_cost(self):
for contract in self:
contract.sum_cost = sum(contract.cost_ids.mapped('amount'))
@api.onchange('vehicle_id')
def _onchange_vehicle(self):
if self.vehicle_id:
self.odometer_unit = self.vehicle_id.odometer_unit
@api.multi
def contract_close(self):
for record in self:
record.state = 'closed'
@api.multi
def contract_open(self):
for record in self:
record.state = 'open'
@api.multi
def act_renew_contract(self):
assert len(self.ids) == 1, "This operation should only be done for 1 single contract at a time, as it it suppose to open a window as result"
for element in self:
#compute end date
startdate = fields.Date.from_string(element.start_date)
enddate = fields.Date.from_string(element.expiration_date)
diffdate = (enddate - startdate)
default = {
'date': fields.Date.context_today(self),
'start_date': fields.Date.to_string(fields.Date.from_string(element.expiration_date) + relativedelta(days=1)),
'expiration_date': fields.Date.to_string(enddate + diffdate),
}
newid = element.copy(default).id
return {
'name': _("Renew Contract"),
'view_mode': 'form',
'view_id': self.env.ref('fleet.fleet_vehicle_log_contract_view_form').id,
'view_type': 'tree,form',
'res_model': 'fleet.vehicle.log.contract',
'type': 'ir.actions.act_window',
'domain': '[]',
'res_id': newid,
'context': {'active_id': newid},
}
@api.model
def scheduler_manage_auto_costs(self):
#This method is called by a cron task
#It creates costs for contracts having the "recurring cost" field setted, depending on their frequency
#For example, if a contract has a reccuring cost of 200 with a weekly frequency, this method creates a cost of 200 on the first day of each week, from the date of the last recurring costs in the database to today
#If the contract has not yet any recurring costs in the database, the method generates the recurring costs from the start_date to today
#The created costs are associated to a contract thanks to the many2one field contract_id
#If the contract has no start_date, no cost will be created, even if the contract has recurring costs
VehicleCost = self.env['fleet.vehicle.cost']
deltas = {'yearly': relativedelta(years=+1), 'monthly': relativedelta(months=+1), 'weekly': relativedelta(weeks=+1), 'daily': relativedelta(days=+1)}
contracts = self.env['fleet.vehicle.log.contract'].search([('state', '!=', 'closed')], offset=0, limit=None, order=None)
for contract in contracts:
if not contract.start_date or contract.cost_frequency == 'no':
continue
found = False
last_cost_date = contract.start_date
if contract.generated_cost_ids:
last_autogenerated_cost = VehicleCost.search([('contract_id', '=', contract.id), ('auto_generated', '=', True)], offset=0, limit=1, order='date desc')
if last_autogenerated_cost:
found = True
last_cost_date = last_autogenerated_cost.date
startdate = fields.Date.from_string(last_cost_date)
if found:
startdate += deltas.get(contract.cost_frequency)
today = fields.Date.from_string(fields.Date.context_today(self))
while (startdate <= today) & (startdate <= fields.Date.from_string(contract.expiration_date)):
data = {
'amount': contract.cost_generated,
'date': fields.Date.context_today(self),
'vehicle_id': contract.vehicle_id.id,
'cost_subtype_id': contract.cost_subtype_id.id,
'contract_id': contract.id,
'auto_generated': True
}
self.env['fleet.vehicle.cost'].create(data)
startdate += deltas.get(contract.cost_frequency)
return True
@api.model
def scheduler_manage_contract_expiration(self):
#This method is called by a cron task
#It manages the state of a contract, possibly by posting a message on the vehicle concerned and updating its status
date_today = fields.Date.from_string(fields.Date.context_today(self))
limit_date = fields.Date.to_string(date_today + relativedelta(days=+15))
contracts = self.search([('state', '=', 'open'), ('expiration_date', '<', limit_date)])
res = {}
for contract in contracts:
if contract.vehicle_id.id in res:
res[contract.vehicle_id.id] += 1
else:
res[contract.vehicle_id.id] = 1
Vehicle = self.env['fleet.vehicle']
for vehicle, value in res.items():
Vehicle.browse(vehicle).message_post(body=_('%s contract(s) need(s) to be renewed and/or closed!') % value)
return contracts.write({'state': 'toclose'})
@api.model
def run_scheduler(self):
self.scheduler_manage_auto_costs()
self.scheduler_manage_contract_expiration()
| dfang/odoo | addons/fleet/models/fleet.py | Python | agpl-3.0 | 35,182 |
# -*- coding: utf-8 -*-
from setuptools import setup, Command
name = "suitable"
description = "Suitable is a thin wrapper around the Ansible API."
def get_long_description():
with open('README.rst') as readme_file:
for line in readme_file.readlines():
if description in line:
continue
yield line.replace('\n', '')
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
import subprocess
errno = subprocess.call([sys.executable, 'runtests.py'])
raise SystemExit(errno)
setup(
name='suitable',
version='0.17.2',
url='http://github.com/seantis/suitable/',
license='GPLv3',
author='Denis Krienbühl',
author_email='[email protected]',
description=description,
long_description='\n'.join(get_long_description()),
packages=['suitable'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'ansible>=2.8.0.0'
],
# Ansible does not support Python 3.0 through 3.4, so we do not either
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
extras_require={
'tests': [
'mitogen>=0.2.8',
'paramiko',
'port-for',
'pytest',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| seantis/suitable | setup.py | Python | gpl-3.0 | 1,752 |
# -*- coding: utf-8 -*-
import re
import os
import shutil
from expects import expect
from mamba import describe, context, before, after
from spec.ui._ipod_helpers import *
from spec.ui._fixture import update_environment
with describe('ipodio pull') as _:
@before.all
def setup_all():
update_environment(_)
bootstrap_ipod(_.mountpoint_path)
populate_ipod(_.mountpoint_path, _.songs)
_.execution = _.env.run(*_.cmd + ['pull'])
@after.all
def cleanup():
shutil.rmtree(_.env_path)
def should_copy_selected_songs_to_the_current_directory():
copied_songs = [path for path in _.execution.files_created if path.endswith('.mp3')]
expect(copied_songs).to.have.length(2)
def should_name_copied_songs_using_number_title_album_artist():
pattern = re.compile('^(\d+)?_([\s\w]+)?_([\s\w]+)?_([\s\w]+)?.mp3$')
copied_songs = [path for path in _.execution.files_created if pattern.match(os.path.basename(path))]
expect(copied_songs).to.have.length(2)
def should_create_a_hierarchy_of_directories_using_artist_and_album():
created_directories = [path for path in _.execution.files_created if not path.endswith('.mp3')]
expect(created_directories).to.have(
'Jono Bacon',
'Jono Bacon/Released as a single',
'Richard Stallman',
)
def should_avoid_overwriting_song_files():
execution = _.env.run(*_.cmd + ['pull'])
expect(execution.files_created).to.be.empty
with context('with filesystem errors'):
def should_directory_creation_failures():
execution = _.env.run(*_.cmd + ['pull', '--dest', _.unwritable_dir])
expect(execution.stdout).to.have('Could not create directory')
def should_file_copy_failures():
execution = _.env.run(*_.cmd + ['pull', '--plain', '--dest', _.unwritable_dir])
expect(execution.stdout).to.have('Could not copy')
@before.each
def setup_unwritable_destination():
_.unwritable_dir = 'unwritable'
_.unwritable_dir_path = os.path.join(_.env_path, _.unwritable_dir)
with open(_.unwritable_dir_path, 'w') as fakedir:
fakedir.write('foo\n')
with context('with --force option'):
def should_not_mind_overwriting_song_files():
_.env.run(*_.cmd + ['pull'])
execution = _.env.run(*_.cmd + ['--force', 'pull'])
expect(execution.files_updated).to.have.length(2)
with context('with --dest <destination> option'):
def should_copy_the_songs_to_the_destination_directory():
execution = _.env.run(*_.cmd + ['--dest', 'pulled', 'pull'])
copied_songs = [path for path in execution.files_created
if 'pulled' in path and path.endswith('.mp3')]
expect(copied_songs).to.have.length(2)
with context('with --plain option'):
def should_copy_all_files_without_hierarchy():
execution = _.env.run(*_.cmd + ['--plain', 'pull'])
expect(execution.files_created).to.have.length(2)
| jvrsantacruz/ipodio | spec/ui/pull_spec.py | Python | bsd-3-clause | 3,160 |
#!/usr/bin/python
# coding= utf-8
class Repubblica:
def __init__(self, file_frequenze):
self.massima_frequenza=0
self.dizionario_frequenze={}
self.dizionario_cumulato={}
self.calcola_frequenze (file_frequenze)
def calcola_frequenze(self, file_frequenze):
with open(file_frequenze, 'r') as f:
for line in f:
l=line.split("\t")
tupla=tuple([l[0]]+[l[1]])
self.dizionario_frequenze[tupla]=int(l[2])
self.dizionario_cumulato[l[0]]=int(l[2]) if not l[0] in self.dizionario_cumulato else self.dizionario_cumulato[l[0]]+int(l[2])
if self.dizionario_cumulato[l[0]]>self.massima_frequenza:
self.massima_frequenza=self.dizionario_cumulato[l[0]]
| ellepannitto/Tesi | Repubblica.py | Python | gpl-3.0 | 691 |
"""AutoComplete.py - An IDLE extension for automatically completing names.
This extension can complete either attribute names of file names. It can pop
a window with all available names, for the user to select from.
"""
import os
import sys
import string
from idlelib.configHandler import idleConf
# This string includes all chars that may be in a file name (without a path
# separator)
FILENAME_CHARS = string.ascii_letters + string.digits + os.curdir + "._~#$:-"
# This string includes all chars that may be in an identifier
ID_CHARS = string.ascii_letters + string.digits + "_"
# These constants represent the two different types of completions
COMPLETE_ATTRIBUTES, COMPLETE_FILES = range(1, 2+1)
from idlelib import AutoCompleteWindow
from idlelib.HyperParser import HyperParser
import __main__
SEPS = os.sep
if os.altsep: # e.g. '/' on Windows...
SEPS += os.altsep
class AutoComplete:
menudefs = [
('edit', [
("Show Completions", "<<force-open-completions>>"),
])
]
popupwait = idleConf.GetOption("extensions", "AutoComplete",
"popupwait", type="int", default=0)
def __init__(self, editwin=None):
self.editwin = editwin
if editwin is None: # subprocess and test
return
self.text = editwin.text
self.autocompletewindow = None
# id of delayed call, and the index of the text insert when the delayed
# call was issued. If _delayed_completion_id is None, there is no
# delayed call.
self._delayed_completion_id = None
self._delayed_completion_index = None
def _make_autocomplete_window(self):
return AutoCompleteWindow.AutoCompleteWindow(self.text)
def _remove_autocomplete_window(self, event=None):
if self.autocompletewindow:
self.autocompletewindow.hide_window()
self.autocompletewindow = None
def force_open_completions_event(self, event):
"""Happens when the user really wants to open a completion list, even
if a function call is needed.
"""
self.open_completions(True, False, True)
def try_open_completions_event(self, event):
"""Happens when it would be nice to open a completion list, but not
really necessary, for example after an dot, so function
calls won't be made.
"""
lastchar = self.text.get("insert-1c")
if lastchar == ".":
self._open_completions_later(False, False, False,
COMPLETE_ATTRIBUTES)
elif lastchar in SEPS:
self._open_completions_later(False, False, False,
COMPLETE_FILES)
def autocomplete_event(self, event):
"""Happens when the user wants to complete his word, and if necessary,
open a completion list after that (if there is more than one
completion)
"""
if hasattr(event, "mc_state") and event.mc_state:
# A modifier was pressed along with the tab, continue as usual.
return
if self.autocompletewindow and self.autocompletewindow.is_active():
self.autocompletewindow.complete()
return "break"
else:
opened = self.open_completions(False, True, True)
if opened:
return "break"
def _open_completions_later(self, *args):
self._delayed_completion_index = self.text.index("insert")
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = \
self.text.after(self.popupwait, self._delayed_open_completions,
*args)
def _delayed_open_completions(self, *args):
self._delayed_completion_id = None
if self.text.index("insert") != self._delayed_completion_index:
return
self.open_completions(*args)
def open_completions(self, evalfuncs, complete, userWantsWin, mode=None):
"""Find the completions and create the AutoCompleteWindow.
Return True if successful (no syntax error or so found).
if complete is True, then if there's nothing to complete and no
start of completion, won't open completions and return False.
If mode is given, will open a completion list only in this mode.
"""
# Cancel another delayed call, if it exists.
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = None
hp = HyperParser(self.editwin, "insert")
curline = self.text.get("insert linestart", "insert")
i = j = len(curline)
if hp.is_in_string() and (not mode or mode==COMPLETE_FILES):
self._remove_autocomplete_window()
mode = COMPLETE_FILES
while i and curline[i-1] in FILENAME_CHARS:
i -= 1
comp_start = curline[i:j]
j = i
while i and curline[i-1] in FILENAME_CHARS + SEPS:
i -= 1
comp_what = curline[i:j]
elif hp.is_in_code() and (not mode or mode==COMPLETE_ATTRIBUTES):
self._remove_autocomplete_window()
mode = COMPLETE_ATTRIBUTES
while i and curline[i-1] in ID_CHARS:
i -= 1
comp_start = curline[i:j]
if i and curline[i-1] == '.':
hp.set_index("insert-%dc" % (len(curline)-(i-1)))
comp_what = hp.get_expression()
if not comp_what or \
(not evalfuncs and comp_what.find('(') != -1):
return
else:
comp_what = ""
else:
return
if complete and not comp_what and not comp_start:
return
comp_lists = self.fetch_completions(comp_what, mode)
if not comp_lists[0]:
return
self.autocompletewindow = self._make_autocomplete_window()
return not self.autocompletewindow.show_window(
comp_lists, "insert-%dc" % len(comp_start),
complete, mode, userWantsWin)
def fetch_completions(self, what, mode):
"""Return a pair of lists of completions for something. The first list
is a sublist of the second. Both are sorted.
If there is a Python subprocess, get the comp. list there. Otherwise,
either fetch_completions() is running in the subprocess itself or it
was called in an IDLE EditorWindow before any script had been run.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_completion_list",
(what, mode), {})
else:
if mode == COMPLETE_ATTRIBUTES:
if what == "":
namespace = __main__.__dict__.copy()
namespace.update(__main__.__builtins__.__dict__)
bigl = eval("dir()", namespace)
bigl.sort()
if "__all__" in bigl:
smalll = sorted(eval("__all__", namespace))
else:
smalll = [s for s in bigl if s[:1] != '_']
else:
try:
entity = self.get_entity(what)
bigl = dir(entity)
bigl.sort()
if "__all__" in bigl:
smalll = sorted(entity.__all__)
else:
smalll = [s for s in bigl if s[:1] != '_']
except:
return [], []
elif mode == COMPLETE_FILES:
if what == "":
what = "."
try:
expandedpath = os.path.expanduser(what)
bigl = os.listdir(expandedpath)
bigl.sort()
smalll = [s for s in bigl if s[:1] != '.']
except OSError:
return [], []
if not smalll:
smalll = bigl
return smalll, bigl
def get_entity(self, name):
"""Lookup name in a namespace spanning sys.modules and __main.dict__"""
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
return eval(name, namespace)
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_autocomplete', verbosity=2)
| Jeff-Tian/mybnb | Python27/Lib/idlelib/AutoComplete.py | Python | apache-2.0 | 9,194 |
import json
import oauth2 as oauth
import os
import urllib
import config
from resource import SharedResourceHandler
class TwitterAPI(object):
def __init__(self, env_settings):
self.rest_api = TwitterRestAPI(env_settings)
def get_resource(self, resource, params):
"""
Calls the Twitter REST endpoint specified by resource['url'], with REST parameters
specified in params.
Also creates a summary object based on values described in resource.
:param resource: the twitter api resource, eg "followers/list"
:param params: resource params, eg { "screen_name": "foo", "cursor": "-1"}
:return: a tuple of summary_data, rest_response_data
"""
resource_handler = ResourceHandler(resource, params)
url = resource_handler.get_url()
results = self.rest_api.get_response(url)
if not results:
return None, None
key_extractor = KeyExtractor(results)
TwitterAPI.save_nested_values_as_params(resource_handler, key_extractor)
filename = resource_handler.get_results_filename()
TwitterAPI.save_results(results, filename)
summary = TwitterAPI.get_summary(resource_handler, results)
filename = resource_handler.get_summary_filename()
TwitterAPI.save_results(summary, filename)
return summary, results
@staticmethod
def save_nested_values_as_params(resource_handler, key_extractor):
nested_keys = resource_handler.get_nested_keys_to_extract_from_results()
for nested_key in nested_keys:
value = key_extractor.get_nested_value(nested_key)
if value:
resource_handler.put_field(nested_key, value)
else:
resource_handler.remove_field(nested_key)
@staticmethod
def get_summary(resource_handler, data):
summary_fields = resource_handler.get_summary_fields()
num_items, data = resource_handler.get_object_containing_summary_data(data)
if type(data) is dict:
summary_result = {field: data[field] for field in summary_fields if field in data}
else:
summary_result = {}
summary_result['num_items'] = num_items
return {
'params': resource_handler.get_summary_params(),
'result': summary_result
}
@staticmethod
def save_results(results, filename):
full_path = os.path.join(config.PARENT_DATA_FOLDER, filename)
TwitterAPI.make_path_if_not_exists(full_path)
with open(full_path, 'w') as f:
f.write(json.dumps(results, indent=2))
@staticmethod
def make_path_if_not_exists(full_path):
parent_directory = os.path.dirname(full_path)
if not os.path.exists(parent_directory):
os.makedirs(parent_directory)
# --------- helper classes for TwitterAPI ---------
class ResourceHandler(SharedResourceHandler):
def __init__(self, resource, params):
super(ResourceHandler, self).__init__(resource, params)
def get_url(self):
url_params = {p: self.params[p] for p in self.params if self.params[p]}
return config.TWITTER_BASE_URL.format(resource=self.resource['url']) + urllib.urlencode(url_params)
def put_field(self, field, value):
self.params[field] = value
def remove_field(self, field):
self.resource['filename_fields'].remove(field)
def get_results_filename(self):
return '.'.join(self.filename_parts) + '.json'
def get_summary_filename(self):
return '.'.join(self.filename_parts[:-2]) + '.last.json'
def get_nested_keys_to_extract_from_results(self):
if 'summarize_filename_prefix' in self.resource:
return self.resource['summarize_filename_prefix']
return []
def get_summary_fields(self):
if 'raw_summary_fields' in self.resource:
return self.resource['raw_summary_fields']
return []
def get_summary_params(self):
return {f: self.params[f] for f in self.resource['filename_fields'] if f in self.params}
def get_object_containing_summary_data(self, data):
num_items = None
if 'raw_data_field' in self.resource and self.resource['raw_data_field']:
data = data[self.resource['raw_data_field']]
if type(data) is list and data:
num_items = len(data)
data = data[-1]
return num_items, data
class KeyExtractor(object):
def __init__(self, response):
if type(response) is list:
self.data = response[0]
else:
self.data = response
def get_nested_value(self, nested_key):
return KeyExtractor._traverse_data_for_value(self.data, nested_key.split("|"))
@staticmethod
def _traverse_data_for_value(value, keys):
if not keys:
return None
if type(value) is list:
results = [KeyExtractor._traverse_data_for_value(v, keys) for v in value]
results = [r for r in results if r is not None]
return '|'.join(results)
if keys[0] not in value:
return None
if len(keys) == 1:
return value[keys[0]]
return KeyExtractor._traverse_data_for_value(value[keys[0]], keys[1:])
class TwitterRestAPI(object):
HTTP_STATUS_OKAY = '200'
HTTP_RATE_LIMIT_EXCEEDED = '429'
def __init__(self, env_settings):
consumer_key = env_settings['CONSUMER_KEY']
consumer_secret = env_settings['CONSUMER_SECRET']
access_token = env_settings['ACCESS_TOKEN']
access_secret = env_settings['ACCESS_SECRET']
consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
token = oauth.Token(key=access_token, secret=access_secret)
self.client = oauth.Client(consumer, token)
def get_response(self, url):
header, response = self.client.request(url, method="GET")
if header['status'] != TwitterRestAPI.HTTP_STATUS_OKAY:
print header['status'], response
if header['status'] == TwitterRestAPI.HTTP_RATE_LIMIT_EXCEEDED:
exit('Rate limit exceeded')
return None
else:
return json.loads(response)
| celiala/twitter | client/twitter.py | Python | mit | 6,272 |
from env_checker_error import EnvCheckerError
class WritablePathError(Exception):
def __init__(self, path):
self.path = path
def resolve(self):
# TODO: Actually attempt to resolve this?
raise EnvCheckerError(
"Ice requires write access too `%s` to run." %
self.path)
| rdoyle1978/Ice | src/ice/error/writable_path_error.py | Python | mit | 303 |
#!/usr/bin/python
__author__="Paulo Victor Maluf"
__date__ ="$27/10/2014 13:35:12$"
from connection import Connect;
class Postgres(object):
def __init__(self, dsn=None):
self.dsn = dsn
self.connect()
def connect(self):
conn = Connect(self.dsn)
self.cursor = conn.cur()
def exec_sql(self, sql):
self.cursor.execute(sql)
return self.cursor.fetchall()
def __del__(self):
del self
| pmaluf/pgzabbix | classes/postgres.py | Python | mit | 461 |
"""
Example 3: Saving all minima found to an xyz file
"""
from pygmin.systems import LJCluster
from pygmin.utils.xyz import write_xyz
natoms = 12
niter = 100
system = LJCluster(natoms)
db = system.create_database()
bh = system.get_basinhopping(database=db)
bh.run(niter)
with open("lowest", "w") as fout:
for minimum in db.minima():
title = "energy = ", str(minimum.energy)
write_xyz(fout, minimum.coords, title)
############################################################
# some visualization
############################################################
try:
import pygmin.utils.pymolwrapper as pym
pym.start()
frame=1
for minimum in db.minima():
pym.draw_spheres(minimum.coords.reshape(-1, 3), "A", frame)
frame=frame+1
except:
print "Could not draw using pymol, skipping this step"
| js850/PyGMIN | examples/basinhopping/3_savelowest.py | Python | gpl-3.0 | 869 |
from collections import namedtuple
PurchasedItem = namedtuple('PurchasedItem',
'name, quantity, price, currency, sku')
class RedirectNeeded(Exception):
pass
class PaymentError(Exception):
pass
class ExternalPostNeeded(Exception):
pass
| artursmet/django-payments | payments/__init__.py | Python | bsd-3-clause | 281 |
controller = {
'leds': 16,
'neopixel_gpio_pin': 18,
'neopixel_frequency': 800000,
'neopixel_dma': 5,
'neopixel_invert': False,
'neopixel_brightness': 255,
'neopixel_channel': 0,
'neopixel_strip': 0x00081000
}
| bradcornford/Neopixel-Controller | neopixelcontroller/example.config.py | Python | mit | 241 |
'''
Module written by "mmrdns_write_python_slm_norm_fit_eqns.m". Fitting functions for searations constants of gravitatioal purterbations of Kerr.
'''
# Import useful things
from numpy import log,exp
# Domain map jf --> kappa( x =def [jf,l,m] ).
# NOTE that the function below MUST be consistent with the domain_map of "fit_slm_norms.m"
kappa = lambda x: ( log(2.0-x[0])/log(3.0) )**( 1 / (2.0+x[1]-abs(x[2])) )
# Fit Equations for QNM complex frequencies (CW). Note that while these are the zero-damped modes in the extremal kerr limit, these are the effectively unique solutions throughout non-extremal kerr.
CC = {
(2,2,0): lambda jf: 7.86366171 - 3.61447483*kappa([jf,2,2]) + 3.48996689*kappa([jf,2,2])**2 - 2.29347705*kappa([jf,2,2])**3 + 0.74425069*kappa([jf,2,2])**4 ,
(2,-2,0): lambda jf: CC[(2,2,0)](jf).conj(),
(2,2,1): lambda jf: 7.86298703 - 3.59872285*kappa([jf,2,2]) + 2.88459437*kappa([jf,2,2])**2 - 0.92740734*kappa([jf,2,2])**3 - 0.04445478*kappa([jf,2,2])**4 ,
(2,-2,1): lambda jf: CC[(2,2,1)](jf).conj(),
(3,3,0): lambda jf: 3.51631915 + 0.16499714*kappa([jf,3,3]) + 1.30114387*kappa([jf,3,3])**2 - 0.83622153*kappa([jf,3,3])**3 + 0.82020713*kappa([jf,3,3])**4 ,
(3,-3,0): lambda jf: CC[(3,3,0)](jf).conj(),
(3,3,1): lambda jf: 3.51530809 + 0.19285707*kappa([jf,3,3]) + 0.96814190*kappa([jf,3,3])**2 - 0.00547882*kappa([jf,3,3])**3 + 0.24982172*kappa([jf,3,3])**4 ,
(3,-3,1): lambda jf: CC[(3,3,1)](jf).conj(),
(4,4,0): lambda jf: 1.75389888 + 1.00111258*kappa([jf,4,4]) + 1.55498487*kappa([jf,4,4])**2 - 1.22344804*kappa([jf,4,4])**3 + 1.64621074*kappa([jf,4,4])**4 ,
(4,-4,0): lambda jf: CC[(4,4,0)](jf).conj(),
(5,5,0): lambda jf: 0.91349889 + 0.89568178*kappa([jf,5,5]) + 2.54404526*kappa([jf,5,5])**2 - 2.82437113*kappa([jf,5,5])**3 + 3.28143852*kappa([jf,5,5])**4 ,
(5,-5,0): lambda jf: CC[(5,5,0)](jf).conj(),
(2,1,0): lambda jf: 3.04393302 - 0.06877527*kappa([jf,2,1]) + 0.87671129*kappa([jf,2,1])**2 - 3.92206769*kappa([jf,2,1])**3 + 8.59631959*kappa([jf,2,1])**4 - 8.52199526*kappa([jf,2,1])**5 + 3.31150324*kappa([jf,2,1])**6 ,
(2,-1,0): lambda jf: CC[(2,1,0)](jf).conj(),
(3,2,0): lambda jf: 0.74845717 - 0.08157463*kappa([jf,3,2]) + 1.03748092*kappa([jf,3,2])**2 - 3.27926931*kappa([jf,3,2])**3 + 7.24584503*kappa([jf,3,2])**4 - 7.41316799*kappa([jf,3,2])**5 + 3.06056035*kappa([jf,3,2])**6 ,
(3,-2,0): lambda jf: CC[(3,2,0)](jf).conj(),
(4,3,0): lambda jf: 0.39542385 - 0.09918352*kappa([jf,4,3]) + 1.52850262*kappa([jf,4,3])**2 - 5.09932727*kappa([jf,4,3])**3 + 10.95647104*kappa([jf,4,3])**4 - 10.99914124*kappa([jf,4,3])**5 + 4.52212985*kappa([jf,4,3])**6 ,
(4,-3,0): lambda jf: CC[(4,3,0)](jf).conj()
}
# Cleaning up
# del log,exp
| llondon6/kerr_public | kerr/formula/ksm2_slm_norm.py | Python | mit | 2,772 |
# coding: utf-8
# maposmatic, the web front-end of the MapOSMatic city map generation system
# Copyright (C) 2009 David Decotigny
# Copyright (C) 2009 Frédéric Lehobey
# Copyright (C) 2009 David Mentré
# Copyright (C) 2009 Maxime Petazzoni
# Copyright (C) 2009 Thomas Petazzoni
# Copyright (C) 2009 Gaël Utard
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
import feedparser
from models import MapRenderingJob
import www.settings
def get_latest_blog_posts():
f = feedparser.parse("http://news.maposmatic.org/?feed=rss2")
return f.entries[:5]
def all(request):
# Do not add the useless overhead of parsing blog entries when generating
# the rss feed
if request.path == reverse('rss-feed', args=['maps']):
return {}
return {
'randommap': MapRenderingJob.objects.get_random_with_thumbnail(),
'blogposts': get_latest_blog_posts(),
'MAPOSMATIC_DAEMON_RUNNING': www.settings.is_daemon_running(),
}
| wonderchook/MapOSMatic | www/maposmatic/context_processors.py | Python | agpl-3.0 | 1,602 |
# Copyright 2013 University of Chicago
class EEAgentParameterException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class EEAgentUnauthorizedException(Exception):
pass
class EEAgentSupDException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
| ooici/eeagent | eeagent/eeagent_exceptions.py | Python | apache-2.0 | 337 |
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import hashlib
import os
import time
import uuid
import mock
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_utils import timeutils
import paramiko
import six
from six.moves import range
import cinder
from cinder import exception
from cinder import ssh_utils
from cinder import test
from cinder import utils
CONF = cfg.CONF
class ExecuteTestCase(test.TestCase):
@mock.patch('cinder.utils.processutils.execute')
def test_execute(self, mock_putils_exe):
output = utils.execute('a', 1, foo='bar')
self.assertEqual(mock_putils_exe.return_value, output)
mock_putils_exe.assert_called_once_with('a', 1, foo='bar')
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.utils.processutils.execute')
def test_execute_root(self, mock_putils_exe, mock_get_helper):
output = utils.execute('a', 1, foo='bar', run_as_root=True)
self.assertEqual(mock_putils_exe.return_value, output)
mock_helper = mock_get_helper.return_value
mock_putils_exe.assert_called_once_with('a', 1, foo='bar',
run_as_root=True,
root_helper=mock_helper)
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.utils.processutils.execute')
def test_execute_root_and_helper(self, mock_putils_exe, mock_get_helper):
mock_helper = mock.Mock()
output = utils.execute('a', 1, foo='bar', run_as_root=True,
root_helper=mock_helper)
self.assertEqual(mock_putils_exe.return_value, output)
self.assertFalse(mock_get_helper.called)
mock_putils_exe.assert_called_once_with('a', 1, foo='bar',
run_as_root=True,
root_helper=mock_helper)
class GetFromPathTestCase(test.TestCase):
def test_tolerates_nones(self):
f = utils.get_from_path
input = []
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [None]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': None}]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': None}}]
self.assertEqual([{'b': None}], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': None}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
self.assertEqual([{'b': {'c': None}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
def test_does_select(self):
f = utils.get_from_path
input = [{'a': 'a_1'}]
self.assertEqual(['a_1'], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': 'b_1'}}]
self.assertEqual([{'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': None}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': {'c': 'c_2'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
f(input, "a"))
self.assertEqual([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
self.assertEqual(['c_1', 'c_2'], f(input, "a/b/c"))
self.assertEqual([], f(input, "a/b/c/d"))
self.assertEqual([], f(input, "c/a/b/d"))
self.assertEqual([], f(input, "i/r/t"))
def test_flattens_lists(self):
f = utils.get_from_path
input = [{'a': [1, 2, 3]}]
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}]
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [1, 2, {'b': 'b_1'}]}]
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
def test_bad_xpath(self):
f = utils.get_from_path
self.assertRaises(exception.Error, f, [], None)
self.assertRaises(exception.Error, f, [], "")
self.assertRaises(exception.Error, f, [], "/")
self.assertRaises(exception.Error, f, [], "/a")
self.assertRaises(exception.Error, f, [], "/a/")
self.assertRaises(exception.Error, f, [], "//")
self.assertRaises(exception.Error, f, [], "//a")
self.assertRaises(exception.Error, f, [], "a//a")
self.assertRaises(exception.Error, f, [], "a//a/")
self.assertRaises(exception.Error, f, [], "a/a/")
def test_real_failure1(self):
# Real world failure case...
# We weren't coping when the input was a Dictionary instead of a List
# This led to test_accepts_dictionaries
f = utils.get_from_path
inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
'address': '192.168.0.3'},
'hostname': ''}
private_ips = f(inst, 'fixed_ip/address')
public_ips = f(inst, 'fixed_ip/floating_ips/address')
self.assertEqual(['192.168.0.3'], private_ips)
self.assertEqual(['1.2.3.4'], public_ips)
def test_accepts_dictionaries(self):
f = utils.get_from_path
input = {'a': [1, 2, 3]}
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': {'b': [1, 2, 3]}}
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [1, 2, {'b': 'b_1'}]}
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
class GenericUtilsTestCase(test.TestCase):
@mock.patch('os.path.exists', return_value=True)
def test_find_config(self, mock_exists):
path = '/etc/cinder/cinder.conf'
cfgpath = utils.find_config(path)
self.assertEqual(path, cfgpath)
mock_exists.return_value = False
self.assertRaises(exception.ConfigNotFound,
utils.find_config,
path)
def test_as_int(self):
test_obj_int = '2'
test_obj_float = '2.2'
for obj in [test_obj_int, test_obj_float]:
self.assertEqual(2, utils.as_int(obj))
obj = 'not_a_number'
self.assertEqual(obj, utils.as_int(obj))
self.assertRaises(TypeError,
utils.as_int,
obj,
quiet=False)
def test_is_int_like(self):
self.assertTrue(utils.is_int_like(1))
self.assertTrue(utils.is_int_like(-1))
self.assertTrue(utils.is_int_like(0b1))
self.assertTrue(utils.is_int_like(0o1))
self.assertTrue(utils.is_int_like(0x1))
self.assertTrue(utils.is_int_like('1'))
self.assertFalse(utils.is_int_like(1.0))
self.assertFalse(utils.is_int_like('abc'))
def test_check_exclusive_options(self):
utils.check_exclusive_options()
utils.check_exclusive_options(something=None,
pretty_keys=True,
unit_test=True)
self.assertRaises(exception.InvalidInput,
utils.check_exclusive_options,
test=True,
unit=False,
pretty_keys=True)
self.assertRaises(exception.InvalidInput,
utils.check_exclusive_options,
test=True,
unit=False,
pretty_keys=False)
def test_require_driver_intialized(self):
driver = mock.Mock()
driver.initialized = True
utils.require_driver_initialized(driver)
driver.initialized = False
self.assertRaises(exception.DriverNotInitialized,
utils.require_driver_initialized,
driver)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_is_valid_boolstr(self):
self.assertTrue(utils.is_valid_boolstr(True))
self.assertTrue(utils.is_valid_boolstr('trUe'))
self.assertTrue(utils.is_valid_boolstr(False))
self.assertTrue(utils.is_valid_boolstr('faLse'))
self.assertTrue(utils.is_valid_boolstr('yeS'))
self.assertTrue(utils.is_valid_boolstr('nO'))
self.assertTrue(utils.is_valid_boolstr('y'))
self.assertTrue(utils.is_valid_boolstr('N'))
self.assertTrue(utils.is_valid_boolstr(1))
self.assertTrue(utils.is_valid_boolstr('1'))
self.assertTrue(utils.is_valid_boolstr(0))
self.assertTrue(utils.is_valid_boolstr('0'))
@mock.patch('os.path.join', side_effect=lambda x, y: '/'.join((x, y)))
def test_make_dev_path(self, mock_join):
self.assertEqual('/dev/xvda', utils.make_dev_path('xvda'))
self.assertEqual('/dev/xvdb1', utils.make_dev_path('xvdb', 1))
self.assertEqual('/foo/xvdc1', utils.make_dev_path('xvdc', 1, '/foo'))
@mock.patch('cinder.utils.execute')
def test_read_file_as_root(self, mock_exec):
out = mock.Mock()
err = mock.Mock()
mock_exec.return_value = (out, err)
test_filepath = '/some/random/path'
output = utils.read_file_as_root(test_filepath)
mock_exec.assert_called_once_with('cat', test_filepath,
run_as_root=True)
self.assertEqual(out, output)
@mock.patch('cinder.utils.execute',
side_effect=putils.ProcessExecutionError)
def test_read_file_as_root_fails(self, mock_exec):
test_filepath = '/some/random/path'
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root,
test_filepath)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_service_is_up(self, mock_utcnow):
fts_func = datetime.datetime.fromtimestamp
fake_now = 1000
down_time = 5
self.flags(service_down_time=down_time)
mock_utcnow.return_value = fts_func(fake_now)
# Up (equal)
service = {'updated_at': fts_func(fake_now - down_time),
'created_at': fts_func(fake_now - down_time)}
result = utils.service_is_up(service)
self.assertTrue(result)
# Up
service = {'updated_at': fts_func(fake_now - down_time + 1),
'created_at': fts_func(fake_now - down_time + 1)}
result = utils.service_is_up(service)
self.assertTrue(result)
# Down
service = {'updated_at': fts_func(fake_now - down_time - 1),
'created_at': fts_func(fake_now - down_time - 1)}
result = utils.service_is_up(service)
self.assertFalse(result)
def test_safe_parse_xml(self):
normal_body = ('<?xml version="1.0" ?>'
'<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
def killer_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
dom = utils.safe_minidom_parse_string(normal_body)
# Some versions of minidom inject extra newlines so we ignore them
result = str(dom.toxml()).replace('\n', '')
self.assertEqual(normal_body, result)
self.assertRaises(ValueError,
utils.safe_minidom_parse_string,
killer_body())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
def test_hash_file(self):
data = b'Mary had a little lamb, its fleece as white as snow'
flo = six.BytesIO(data)
h1 = utils.hash_file(flo)
h2 = hashlib.sha1(data).hexdigest()
self.assertEqual(h1, h2)
def test_check_ssh_injection(self):
cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer']
self.assertIsNone(utils.check_ssh_injection(cmd_list))
cmd_list = ['echo', '"quoted arg with space"']
self.assertIsNone(utils.check_ssh_injection(cmd_list))
cmd_list = ['echo', "'quoted arg with space'"]
self.assertIsNone(utils.check_ssh_injection(cmd_list))
def test_check_ssh_injection_on_error(self):
with_unquoted_space = ['ssh', 'my_name@ name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_unquoted_space)
with_danger_chars = ['||', 'my_name@name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_danger_chars)
with_danger_char = [';', 'my_name@name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_danger_char)
with_special = ['cmd', 'virus;ls']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_special)
quoted_with_unescaped = ['cmd', '"arg\"withunescaped"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
quoted_with_unescaped)
bad_before_quotes = ['cmd', 'virus;"quoted argument"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_before_quotes)
bad_after_quotes = ['echo', '"quoted argument";rm -rf']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_after_quotes)
bad_within_quotes = ['echo', "'quoted argument `rm -rf`'"]
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_within_quotes)
with_multiple_quotes = ['echo', '"quoted";virus;"quoted"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_multiple_quotes)
with_multiple_quotes = ['echo', '"quoted";virus;\'quoted\'']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_multiple_quotes)
@mock.patch('paramiko.SSHClient')
def test_create_channel(self, mock_client):
test_width = 600
test_height = 800
mock_channel = mock.Mock()
mock_client.invoke_shell.return_value = mock_channel
utils.create_channel(mock_client, test_width, test_height)
mock_client.invoke_shell.assert_called_once_with()
mock_channel.resize_pty.assert_called_once_with(test_width,
test_height)
@mock.patch('os.stat')
def test_get_file_mode(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_gid = 33333
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
mode = utils.get_file_mode(test_file)
self.assertEqual(mode, 0o777)
mock_stat.assert_called_once_with(test_file)
@mock.patch('os.stat')
def test_get_file_gid(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_gid = 33333
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
gid = utils.get_file_gid(test_file)
self.assertEqual(gid, 33333)
mock_stat.assert_called_once_with(test_file)
@mock.patch('cinder.utils.CONF')
def test_get_root_helper(self, mock_conf):
mock_conf.rootwrap_config = '/path/to/conf'
self.assertEqual('sudo cinder-rootwrap /path/to/conf',
utils.get_root_helper())
class TemporaryChownTestCase(test.TestCase):
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=1234)
@mock.patch('cinder.utils.execute')
def test_get_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename):
mock_exec.assert_called_once_with('chown', 1234, test_filename,
run_as_root=True)
mock_getuid.asset_called_once_with()
mock_stat.assert_called_once_with(test_filename)
calls = [mock.call('chown', 1234, test_filename, run_as_root=True),
mock.call('chown', 5678, test_filename, run_as_root=True)]
mock_exec.assert_has_calls(calls)
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=1234)
@mock.patch('cinder.utils.execute')
def test_supplied_owner_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename, owner_uid=9101):
mock_exec.assert_called_once_with('chown', 9101, test_filename,
run_as_root=True)
self.assertFalse(mock_getuid.called)
mock_stat.assert_called_once_with(test_filename)
calls = [mock.call('chown', 9101, test_filename, run_as_root=True),
mock.call('chown', 5678, test_filename, run_as_root=True)]
mock_exec.assert_has_calls(calls)
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=5678)
@mock.patch('cinder.utils.execute')
def test_matching_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename):
pass
mock_getuid.asset_called_once_with()
mock_stat.assert_called_once_with(test_filename)
self.assertFalse(mock_exec.called)
class TempdirTestCase(test.TestCase):
@mock.patch('tempfile.mkdtemp')
@mock.patch('shutil.rmtree')
def test_tempdir(self, mock_rmtree, mock_mkdtemp):
with utils.tempdir(a='1', b=2) as td:
self.assertEqual(mock_mkdtemp.return_value, td)
self.assertFalse(mock_rmtree.called)
mock_mkdtemp.assert_called_once_with(a='1', b=2)
mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value)
@mock.patch('tempfile.mkdtemp')
@mock.patch('shutil.rmtree', side_effect=OSError)
def test_tempdir_error(self, mock_rmtree, mock_mkdtemp):
with utils.tempdir(a='1', b=2) as td:
self.assertEqual(mock_mkdtemp.return_value, td)
self.assertFalse(mock_rmtree.called)
mock_mkdtemp.assert_called_once_with(a='1', b=2)
mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value)
class WalkClassHierarchyTestCase(test.TestCase):
def test_walk_class_hierarchy(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B):
pass
class E(A):
pass
class_pairs = zip((D, B, E),
utils.walk_class_hierarchy(A, encountered=[C]))
for actual, expected in class_pairs:
self.assertEqual(actual, expected)
class_pairs = zip((D, B, C, E), utils.walk_class_hierarchy(A))
for actual, expected in class_pairs:
self.assertEqual(actual, expected)
class GetDiskOfPartitionTestCase(test.TestCase):
def test_devpath_is_diskpath(self):
devpath = '/some/path'
st_mock = mock.Mock()
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual('/some/path', output[0])
self.assertIs(st_mock, output[1])
with mock.patch('os.stat') as mock_stat:
devpath = '/some/path'
output = utils._get_disk_of_partition(devpath)
mock_stat.assert_called_once_with(devpath)
self.assertEqual(devpath, output[0])
self.assertIs(mock_stat.return_value, output[1])
@mock.patch('os.stat', side_effect=OSError)
def test_stat_oserror(self, mock_stat):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
mock_stat.assert_called_once_with('/some/path')
self.assertEqual(devpath, output[0])
self.assertIs(st_mock, output[1])
@mock.patch('stat.S_ISBLK', return_value=True)
@mock.patch('os.stat')
def test_diskpath_is_block_device(self, mock_stat, mock_isblk):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual('/some/path', output[0])
self.assertEqual(mock_stat.return_value, output[1])
@mock.patch('stat.S_ISBLK', return_value=False)
@mock.patch('os.stat')
def test_diskpath_is_not_block_device(self, mock_stat, mock_isblk):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual(devpath, output[0])
self.assertEqual(st_mock, output[1])
class GetBlkdevMajorMinorTestCase(test.TestCase):
@mock.patch('os.stat')
def test_get_file_size(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_size = 1074253824
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
size = utils.get_file_size(test_file)
self.assertEqual(size, stat_result.st_size)
mock_stat.assert_called_once_with(test_file)
@mock.patch('os.stat')
def test_get_blkdev_major_minor(self, mock_stat):
class stat_result(object):
st_mode = 0o60660
st_rdev = os.makedev(253, 7)
test_device = '/dev/made_up_blkdev'
mock_stat.return_value = stat_result
dev = utils.get_blkdev_major_minor(test_device)
self.assertEqual('253:7', dev)
mock_stat.assert_called_once_with(test_device)
@mock.patch('os.stat')
@mock.patch.object(utils, 'execute')
def _test_get_blkdev_major_minor_file(self, test_partition,
mock_exec, mock_stat):
mock_exec.return_value = (
'Filesystem Size Used Avail Use%% Mounted on\n'
'%s 4096 2048 2048 50%% /tmp\n' % test_partition, None)
test_file = '/tmp/file'
test_disk = '/dev/made_up_disk'
class stat_result_file(object):
st_mode = 0o660
class stat_result_partition(object):
st_mode = 0o60660
st_rdev = os.makedev(8, 65)
class stat_result_disk(object):
st_mode = 0o60660
st_rdev = os.makedev(8, 64)
def fake_stat(path):
try:
return {test_file: stat_result_file,
test_partition: stat_result_partition,
test_disk: stat_result_disk}[path]
except KeyError:
raise OSError
mock_stat.side_effect = fake_stat
dev = utils.get_blkdev_major_minor(test_file)
mock_stat.assert_any_call(test_file)
mock_exec.assert_called_once_with('df', test_file)
if test_partition.startswith('/'):
mock_stat.assert_any_call(test_partition)
mock_stat.assert_any_call(test_disk)
return dev
def test_get_blkdev_major_minor_file(self):
dev = self._test_get_blkdev_major_minor_file('/dev/made_up_disk1')
self.assertEqual('8:64', dev)
def test_get_blkdev_major_minor_file_nfs(self):
dev = self._test_get_blkdev_major_minor_file('nfs-server:/export/path')
self.assertIsNone(dev)
@mock.patch('os.stat')
@mock.patch('stat.S_ISCHR', return_value=False)
@mock.patch('stat.S_ISBLK', return_value=False)
def test_get_blkdev_failure(self, mock_isblk, mock_ischr, mock_stat):
path = '/some/path'
self.assertRaises(exception.Error,
utils.get_blkdev_major_minor,
path, lookup_for_file=False)
mock_stat.assert_called_once_with(path)
mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode)
mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode)
@mock.patch('os.stat')
@mock.patch('stat.S_ISCHR', return_value=True)
@mock.patch('stat.S_ISBLK', return_value=False)
def test_get_blkdev_is_chr(self, mock_isblk, mock_ischr, mock_stat):
path = '/some/path'
output = utils.get_blkdev_major_minor(path, lookup_for_file=False)
mock_stat.assert_called_once_with(path)
mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode)
mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode)
self.assertIs(None, output)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'cinder.tests.unit.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package
+ 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION = []
from cinder.tests.unit.monkey_patch_example import example_a
from cinder.tests.unit.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(
package_a + 'example_function_a'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(
package_a + 'ExampleClassA.example_method'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(
package_a + 'ExampleClassA.example_method_add'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(
package_b + 'example_function_b'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(
package_b + 'ExampleClassB.example_method'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(
package_b + 'ExampleClassB.example_method_add'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
class AuditPeriodTest(test.TestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
test_time = datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = test_time
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEqual(begin,
datetime.datetime(hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEqual(begin, datetime.datetime(minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEqual(begin, datetime.datetime(minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEqual(begin, datetime.datetime(day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEqual(begin, datetime.datetime(hour=6,
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEqual(begin, datetime.datetime(hour=10,
day=3,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(begin, datetime.datetime(day=1,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEqual(begin, datetime.datetime(day=2,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEqual(begin, datetime.datetime(day=15,
month=1,
year=2012))
self.assertEqual(end, datetime.datetime(day=15,
month=2,
year=2012))
@mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime.datetime(day=1,
month=1,
year=2012))
def test_month_jan_day_first(self, mock_utcnow):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(datetime.datetime(day=1, month=11, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=12, year=2011), end)
@mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime.datetime(day=2,
month=1,
year=2012))
def test_month_jan_day_not_first(self, mock_utcnow):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(datetime.datetime(day=1, month=12, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end)
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEqual(begin, datetime.datetime(day=1,
month=1,
year=2011))
self.assertEqual(end, datetime.datetime(day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEqual(begin, datetime.datetime(day=1,
month=2,
year=2011))
self.assertEqual(end, datetime.datetime(day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEqual(begin, datetime.datetime(day=1,
month=6,
year=2010))
self.assertEqual(end, datetime.datetime(day=1,
month=6,
year=2011))
def test_invalid_unit(self):
self.assertRaises(ValueError,
utils.last_completed_audit_period,
unit='invalid_unit')
@mock.patch('cinder.utils.CONF')
def test_uses_conf_unit(self, mock_conf):
mock_conf.volume_usage_audit_period = 'hour'
begin1, end1 = utils.last_completed_audit_period()
self.assertEqual(60.0 * 60, (end1 - begin1).total_seconds())
mock_conf.volume_usage_audit_period = 'day'
begin2, end2 = utils.last_completed_audit_period()
self.assertEqual(60.0 * 60 * 24, (end2 - begin2).total_seconds())
class FakeSSHClient(object):
def __init__(self):
self.id = uuid.uuid4()
self.transport = FakeTransport()
def set_missing_host_key_policy(self, policy):
self.policy = policy
def load_system_host_keys(self):
self.system_host_keys = 'system_host_keys'
def load_host_keys(self, hosts_key_file):
self.hosts_key_file = hosts_key_file
def connect(self, ip, port=22, username=None, password=None,
pkey=None, timeout=10):
pass
def get_transport(self):
return self.transport
def get_policy(self):
return self.policy
def get_host_keys(self):
return '127.0.0.1 ssh-rsa deadbeef'
def close(self):
pass
def __call__(self, *args, **kwargs):
pass
class FakeSock(object):
def settimeout(self, timeout):
pass
class FakeTransport(object):
def __init__(self):
self.active = True
self.sock = FakeSock()
def set_keepalive(self, timeout):
pass
def is_active(self):
return self.active
class SSHPoolTestCase(test.TestCase):
"""Unit test for SSH Connection Pool."""
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_default_hosts_key_file(self, mock_isfile, mock_sshclient,
mock_open, mock_conf):
mock_ssh = mock.MagicMock()
mock_sshclient.return_value = mock_ssh
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
host_key_files = sshpool.hosts_key_file
self.assertEqual('/var/lib/cinder/ssh_known_hosts', host_key_files)
mock_ssh.load_host_keys.assert_called_once_with(
'/var/lib/cinder/ssh_known_hosts')
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_host_key_file_kwargs(self, mock_isfile, mock_sshclient,
mock_open, mock_conf):
mock_ssh = mock.MagicMock()
mock_sshclient.return_value = mock_ssh
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1,
hosts_key_file='dummy_host_keyfile')
host_key_files = sshpool.hosts_key_file
self.assertIn('dummy_host_keyfile', host_key_files)
self.assertIn('/var/lib/cinder/ssh_known_hosts', host_key_files)
expected = [
mock.call.load_host_keys('dummy_host_keyfile'),
mock.call.load_host_keys('/var/lib/cinder/ssh_known_hosts')]
mock_ssh.assert_has_calls(expected, any_order=True)
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.isfile', return_value=True)
@mock.patch('paramiko.RSAKey.from_private_key_file')
@mock.patch('paramiko.SSHClient')
def test_single_ssh_connect(self, mock_sshclient, mock_pkey, mock_isfile,
mock_open, mock_conf):
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with password
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
self.assertEqual(first_id, second_id)
self.assertEqual(1, mock_sshclient.return_value.connect.call_count)
# create with private key
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
privatekey="test",
min_size=1,
max_size=1)
self.assertEqual(2, mock_sshclient.return_value.connect.call_count)
# attempt to create with no password or private key
self.assertRaises(paramiko.SSHException,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
min_size=1,
max_size=1)
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_closed_reopened_ssh_connections(self, mock_sshclient, mock_open):
mock_sshclient.return_value = eval('FakeSSHClient')()
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=4)
with sshpool.item() as ssh:
mock_sshclient.reset_mock()
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
ssh.get_transport().active = False
sshpool.remove(ssh)
self.assertEqual(first_id, second_id)
# create a new client
mock_sshclient.return_value = FakeSSHClient()
with sshpool.item() as ssh:
third_id = ssh.id
self.assertNotEqual(first_id, third_id)
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_missing_ssh_hosts_key_config(self, mock_sshclient, mock_open,
mock_conf):
mock_sshclient.return_value = FakeSSHClient()
mock_conf.ssh_hosts_key_file = None
# create with password
self.assertRaises(exception.ParameterNotFound,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_create_default_known_hosts_file(self, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
CONF.state_path = '/var/lib/cinder'
CONF.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
default_file = '/var/lib/cinder/ssh_known_hosts'
ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with ssh_pool.item() as ssh:
mock_open.assert_called_once_with(default_file, 'a')
ssh_pool.remove(ssh)
@mock.patch('os.path.isfile', return_value=False)
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_ssh_missing_hosts_key_file(self, mock_sshclient, mock_open,
mock_isfile):
mock_sshclient.return_value = FakeSSHClient()
CONF.ssh_hosts_key_file = '/tmp/blah'
self.assertNotIn(CONF.state_path, CONF.ssh_hosts_key_file)
self.assertRaises(exception.InvalidInput,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
@mock.patch.multiple('cinder.ssh_utils.CONF',
strict_ssh_host_key_policy=True,
ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_strict_host_key_policy(self, mock_isfile, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
self.assertTrue(isinstance(ssh.get_policy(),
paramiko.RejectPolicy))
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_not_strict_host_key_policy(self, mock_isfile, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
CONF.strict_ssh_host_key_policy = False
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
self.assertTrue(isinstance(ssh.get_policy(),
paramiko.AutoAddPolicy))
class BrickUtils(test.TestCase):
"""Unit test to test the brick utility wrapper functions."""
@mock.patch('cinder.utils.CONF')
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinder.utils.get_root_helper')
def test_brick_get_connector_properties(self, mock_helper, mock_get,
mock_conf):
mock_conf.my_ip = '1.2.3.4'
output = utils.brick_get_connector_properties()
mock_helper.assert_called_once_with()
mock_get.assert_called_once_with(mock_helper.return_value, '1.2.3.4',
False, False)
self.assertEqual(mock_get.return_value, output)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
@mock.patch('cinder.utils.get_root_helper')
def test_brick_get_connector(self, mock_helper, mock_factory):
output = utils.brick_get_connector('protocol')
mock_helper.assert_called_once_with()
self.assertEqual(mock_factory.return_value, output)
mock_factory.assert_called_once_with(
'protocol', mock_helper.return_value, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=3)
class StringLengthTestCase(test.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, 'name', max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', 'name', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
class AddVisibleAdminMetadataTestCase(test.TestCase):
def test_add_visible_admin_metadata_visible_key_only(self):
admin_metadata = [{"key": "invisible_key", "value": "invisible_value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}]
metadata = [{"key": "key", "value": "value"},
{"key": "readonly", "value": "existing"}]
volume = {'volume_admin_metadata': admin_metadata,
'volume_metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual([{"key": "key", "value": "value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}],
volume['volume_metadata'])
admin_metadata = {"invisible_key": "invisible_value",
"readonly": "visible",
"attached_mode": "visible"}
metadata = {"key": "value", "readonly": "existing"}
volume = {'admin_metadata': admin_metadata,
'metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'key': 'value',
'attached_mode': 'visible',
'readonly': 'visible'},
volume['metadata'])
def test_add_visible_admin_metadata_no_visible_keys(self):
admin_metadata = [
{"key": "invisible_key1", "value": "invisible_value1"},
{"key": "invisible_key2", "value": "invisible_value2"},
{"key": "invisible_key3", "value": "invisible_value3"}]
metadata = [{"key": "key", "value": "value"}]
volume = {'volume_admin_metadata': admin_metadata,
'volume_metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual([{"key": "key", "value": "value"}],
volume['volume_metadata'])
admin_metadata = {"invisible_key1": "invisible_value1",
"invisible_key2": "invisible_value2",
"invisible_key3": "invisible_value3"}
metadata = {"key": "value"}
volume = {'admin_metadata': admin_metadata,
'metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'key': 'value'}, volume['metadata'])
def test_add_visible_admin_metadata_no_existing_metadata(self):
admin_metadata = [{"key": "invisible_key", "value": "invisible_value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}]
volume = {'volume_admin_metadata': admin_metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'},
volume['metadata'])
admin_metadata = {"invisible_key": "invisible_value",
"readonly": "visible",
"attached_mode": "visible"}
volume = {'admin_metadata': admin_metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'},
volume['metadata'])
class InvalidFilterTestCase(test.TestCase):
def test_admin_allows_all_options(self):
ctxt = mock.Mock(name='context')
ctxt.is_admin = True
filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
allowed_search_options = ('allowed1', 'allowed2')
allowed_orig = ('allowed1', 'allowed2')
utils.remove_invalid_filter_options(ctxt, filters,
allowed_search_options)
self.assertEqual(allowed_orig, allowed_search_options)
self.assertEqual(fltrs_orig, filters)
def test_admin_allows_some_options(self):
ctxt = mock.Mock(name='context')
ctxt.is_admin = False
filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
allowed_search_options = ('allowed1', 'allowed2')
allowed_orig = ('allowed1', 'allowed2')
utils.remove_invalid_filter_options(ctxt, filters,
allowed_search_options)
self.assertEqual(allowed_orig, allowed_search_options)
self.assertNotEqual(fltrs_orig, filters)
self.assertEqual(allowed_search_options, tuple(sorted(filters.keys())))
class IsBlkDeviceTestCase(test.TestCase):
@mock.patch('stat.S_ISBLK', return_value=True)
@mock.patch('os.stat')
def test_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'some_device'
self.assertTrue(utils.is_blk_device(dev))
@mock.patch('stat.S_ISBLK', return_value=False)
@mock.patch('os.stat')
def test_not_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'not_some_device'
self.assertFalse(utils.is_blk_device(dev))
@mock.patch('stat.S_ISBLK', side_effect=Exception)
@mock.patch('os.stat')
def test_fail_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'device_exception'
self.assertFalse(utils.is_blk_device(dev))
class WrongException(Exception):
pass
class TestRetryDecorator(test.TestCase):
def setUp(self):
super(TestRetryDecorator, self).setUp()
def test_no_retry_required(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval=2,
retries=3,
backoff_rate=2)
def succeeds():
self.counter += 1
return 'success'
ret = succeeds()
self.assertFalse(mock_sleep.called)
self.assertEqual(ret, 'success')
self.assertEqual(self.counter, 1)
def test_retries_once(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval,
retries,
backoff_rate)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.VolumeBackendAPIException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual(ret, 'success')
self.assertEqual(self.counter, 2)
self.assertEqual(mock_sleep.call_count, 1)
mock_sleep.assert_called_with(interval * backoff_rate)
def test_limit_is_reached(self):
self.counter = 0
retries = 3
interval = 2
backoff_rate = 4
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval,
retries,
backoff_rate)
def always_fails():
self.counter += 1
raise exception.VolumeBackendAPIException(data='fake')
self.assertRaises(exception.VolumeBackendAPIException,
always_fails)
self.assertEqual(retries, self.counter)
expected_sleep_arg = []
for i in range(retries):
if i > 0:
interval *= backoff_rate
expected_sleep_arg.append(float(interval))
mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg))
def test_wrong_exception_no_retry(self):
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException)
def raise_unexpected_error():
raise WrongException("wrong exception")
self.assertRaises(WrongException, raise_unexpected_error)
self.assertFalse(mock_sleep.called)
class VersionTestCase(test.TestCase):
def test_convert_version_to_int(self):
self.assertEqual(utils.convert_version_to_int('6.2.0'), 6002000)
self.assertEqual(utils.convert_version_to_int((6, 4, 3)), 6004003)
self.assertEqual(utils.convert_version_to_int((5, )), 5)
self.assertRaises(exception.CinderException,
utils.convert_version_to_int, '5a.6b')
def test_convert_version_to_string(self):
self.assertEqual(utils.convert_version_to_str(6007000), '6.7.0')
self.assertEqual(utils.convert_version_to_str(4), '4')
def test_convert_version_to_tuple(self):
self.assertEqual(utils.convert_version_to_tuple('6.7.0'), (6, 7, 0))
class LogTracingTestCase(test.TestCase):
def test_utils_setup_tracing(self):
self.mock_object(utils, 'LOG')
utils.setup_tracing(None)
self.assertFalse(utils.TRACE_API)
self.assertFalse(utils.TRACE_METHOD)
self.assertEqual(0, utils.LOG.warning.call_count)
utils.setup_tracing(['method'])
self.assertFalse(utils.TRACE_API)
self.assertTrue(utils.TRACE_METHOD)
self.assertEqual(0, utils.LOG.warning.call_count)
utils.setup_tracing(['method', 'api'])
self.assertTrue(utils.TRACE_API)
self.assertTrue(utils.TRACE_METHOD)
self.assertEqual(0, utils.LOG.warning.call_count)
def test_utils_setup_tracing_invalid_key(self):
self.mock_object(utils, 'LOG')
utils.setup_tracing(['fake'])
self.assertFalse(utils.TRACE_API)
self.assertFalse(utils.TRACE_METHOD)
self.assertEqual(1, utils.LOG.warning.call_count)
def test_utils_setup_tracing_valid_and_invalid_key(self):
self.mock_object(utils, 'LOG')
utils.setup_tracing(['method', 'fake'])
self.assertFalse(utils.TRACE_API)
self.assertTrue(utils.TRACE_METHOD)
self.assertEqual(1, utils.LOG.warning.call_count)
def test_trace_no_tracing(self):
self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(None)
result = _trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(0, utils.LOG.debug.call_count)
def test_utils_trace_method(self):
self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(2, utils.LOG.debug.call_count)
def test_utils_trace_api(self):
self.mock_object(utils, 'LOG')
@utils.trace_api
def _trace_test_api(*args, **kwargs):
return 'OK'
utils.setup_tracing(['api'])
result = _trace_test_api()
self.assertEqual('OK', result)
self.assertEqual(2, utils.LOG.debug.call_count)
def test_utils_trace_method_default_logger(self):
mock_log = self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method_custom_logger(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method_custom_logger()
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
def test_utils_trace_method_inner_decorator(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
def _test_decorator(f):
def blah(*args, **kwargs):
return f(*args, **kwargs)
return blah
@_test_decorator
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method(self)
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
# Ensure the correct function name was logged
for call in mock_log.debug.call_args_list:
self.assertTrue('_trace_test_method' in str(call))
self.assertFalse('blah' in str(call))
def test_utils_trace_method_outer_decorator(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
def _test_decorator(f):
def blah(*args, **kwargs):
return f(*args, **kwargs)
return blah
@utils.trace_method
@_test_decorator
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method(self)
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
# Ensure the incorrect function name was logged
for call in mock_log.debug.call_args_list:
self.assertFalse('_trace_test_method' in str(call))
self.assertTrue('blah' in str(call))
def test_utils_trace_method_outer_decorator_with_functools(self):
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
self.mock_object(utils.logging, 'getLogger', mock_log)
mock_log = self.mock_object(utils, 'LOG')
def _test_decorator(f):
@functools.wraps(f)
def wraps(*args, **kwargs):
return f(*args, **kwargs)
return wraps
@utils.trace_method
@_test_decorator
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
# Ensure the incorrect function name was logged
for call in mock_log.debug.call_args_list:
self.assertTrue('_trace_test_method' in str(call))
self.assertFalse('wraps' in str(call))
def test_utils_trace_method_with_exception(self):
self.LOG = self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method(*args, **kwargs):
raise exception.APITimeout('test message')
utils.setup_tracing(['method'])
self.assertRaises(exception.APITimeout, _trace_test_method)
exception_log = self.LOG.debug.call_args_list[1]
self.assertTrue('exception' in str(exception_log))
self.assertTrue('test message' in str(exception_log))
def test_utils_trace_method_with_time(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
mock_time = mock.Mock(side_effect=[3.1, 6])
self.mock_object(time, 'time', mock_time)
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method(self)
self.assertEqual('OK', result)
return_log = mock_log.debug.call_args_list[1]
self.assertTrue('2900' in str(return_log))
def test_utils_trace_wrapper_class(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
utils.setup_tracing(['method'])
@six.add_metaclass(utils.TraceWrapperMetaclass)
class MyClass(object):
def trace_test_method(self):
return 'OK'
test_class = MyClass()
result = test_class.trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
| CloudServer/cinder | cinder/tests/unit/test_utils.py | Python | apache-2.0 | 68,624 |
#
#
# Copyright (C) 2007, 2008, 2010, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""HTTP module.
"""
import logging
import mimetools
import OpenSSL
import select
import socket
import errno
from cStringIO import StringIO
from ganeti import constants
from ganeti import utils
HTTP_GANETI_VERSION = "Ganeti %s" % constants.RELEASE_VERSION
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_NOT_MODIFIED = 304
HTTP_0_9 = "HTTP/0.9"
HTTP_1_0 = "HTTP/1.0"
HTTP_1_1 = "HTTP/1.1"
HTTP_GET = "GET"
HTTP_HEAD = "HEAD"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_DELETE = "DELETE"
HTTP_ETAG = "ETag"
HTTP_HOST = "Host"
HTTP_SERVER = "Server"
HTTP_DATE = "Date"
HTTP_USER_AGENT = "User-Agent"
HTTP_CONTENT_TYPE = "Content-Type"
HTTP_CONTENT_LENGTH = "Content-Length"
HTTP_CONNECTION = "Connection"
HTTP_KEEP_ALIVE = "Keep-Alive"
HTTP_WWW_AUTHENTICATE = "WWW-Authenticate"
HTTP_AUTHORIZATION = "Authorization"
HTTP_AUTHENTICATION_INFO = "Authentication-Info"
HTTP_ALLOW = "Allow"
HTTP_APP_OCTET_STREAM = "application/octet-stream"
HTTP_APP_JSON = "application/json"
_SSL_UNEXPECTED_EOF = "Unexpected EOF"
# Socket operations
(SOCKOP_SEND,
SOCKOP_RECV,
SOCKOP_SHUTDOWN,
SOCKOP_HANDSHAKE) = range(4)
# send/receive quantum
SOCK_BUF_SIZE = 32768
class HttpError(Exception):
"""Internal exception for HTTP errors.
This should only be used for internal error reporting.
"""
class HttpConnectionClosed(Exception):
"""Internal exception for a closed connection.
This should only be used for internal error reporting. Only use
it if there's no other way to report this condition.
"""
class HttpSessionHandshakeUnexpectedEOF(HttpError):
"""Internal exception for errors during SSL handshake.
This should only be used for internal error reporting.
"""
class HttpSocketTimeout(Exception):
"""Internal exception for socket timeouts.
This should only be used for internal error reporting.
"""
class HttpException(Exception):
code = None
message = None
def __init__(self, message=None, headers=None):
Exception.__init__(self)
self.message = message
self.headers = headers
class HttpBadRequest(HttpException):
"""400 Bad Request
RFC2616, 10.4.1: The request could not be understood by the server
due to malformed syntax. The client SHOULD NOT repeat the request
without modifications.
"""
code = 400
class HttpUnauthorized(HttpException):
"""401 Unauthorized
RFC2616, section 10.4.2: The request requires user
authentication. The response MUST include a WWW-Authenticate header
field (section 14.47) containing a challenge applicable to the
requested resource.
"""
code = 401
class HttpForbidden(HttpException):
"""403 Forbidden
RFC2616, 10.4.4: The server understood the request, but is refusing
to fulfill it. Authorization will not help and the request SHOULD
NOT be repeated.
"""
code = 403
class HttpNotFound(HttpException):
"""404 Not Found
RFC2616, 10.4.5: The server has not found anything matching the
Request-URI. No indication is given of whether the condition is
temporary or permanent.
"""
code = 404
class HttpMethodNotAllowed(HttpException):
"""405 Method Not Allowed
RFC2616, 10.4.6: The method specified in the Request-Line is not
allowed for the resource identified by the Request-URI. The response
MUST include an Allow header containing a list of valid methods for
the requested resource.
"""
code = 405
class HttpNotAcceptable(HttpException):
"""406 Not Acceptable
RFC2616, 10.4.7: The resource identified by the request is only capable of
generating response entities which have content characteristics not
acceptable according to the accept headers sent in the request.
"""
code = 406
class HttpRequestTimeout(HttpException):
"""408 Request Timeout
RFC2616, 10.4.9: The client did not produce a request within the
time that the server was prepared to wait. The client MAY repeat the
request without modifications at any later time.
"""
code = 408
class HttpConflict(HttpException):
"""409 Conflict
RFC2616, 10.4.10: The request could not be completed due to a
conflict with the current state of the resource. This code is only
allowed in situations where it is expected that the user might be
able to resolve the conflict and resubmit the request.
"""
code = 409
class HttpGone(HttpException):
"""410 Gone
RFC2616, 10.4.11: The requested resource is no longer available at
the server and no forwarding address is known. This condition is
expected to be considered permanent.
"""
code = 410
class HttpLengthRequired(HttpException):
"""411 Length Required
RFC2616, 10.4.12: The server refuses to accept the request without a
defined Content-Length. The client MAY repeat the request if it adds
a valid Content-Length header field containing the length of the
message-body in the request message.
"""
code = 411
class HttpPreconditionFailed(HttpException):
"""412 Precondition Failed
RFC2616, 10.4.13: The precondition given in one or more of the
request-header fields evaluated to false when it was tested on the
server.
"""
code = 412
class HttpUnsupportedMediaType(HttpException):
"""415 Unsupported Media Type
RFC2616, 10.4.16: The server is refusing to service the request because the
entity of the request is in a format not supported by the requested resource
for the requested method.
"""
code = 415
class HttpInternalServerError(HttpException):
"""500 Internal Server Error
RFC2616, 10.5.1: The server encountered an unexpected condition
which prevented it from fulfilling the request.
"""
code = 500
class HttpNotImplemented(HttpException):
"""501 Not Implemented
RFC2616, 10.5.2: The server does not support the functionality
required to fulfill the request.
"""
code = 501
class HttpBadGateway(HttpException):
"""502 Bad Gateway
RFC2616, 10.5.3: The server, while acting as a gateway or proxy,
received an invalid response from the upstream server it accessed in
attempting to fulfill the request.
"""
code = 502
class HttpServiceUnavailable(HttpException):
"""503 Service Unavailable
RFC2616, 10.5.4: The server is currently unable to handle the
request due to a temporary overloading or maintenance of the server.
"""
code = 503
class HttpGatewayTimeout(HttpException):
"""504 Gateway Timeout
RFC2616, 10.5.5: The server, while acting as a gateway or proxy, did
not receive a timely response from the upstream server specified by
the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server
(e.g. DNS) it needed to access in attempting to complete the
request.
"""
code = 504
class HttpVersionNotSupported(HttpException):
"""505 HTTP Version Not Supported
RFC2616, 10.5.6: The server does not support, or refuses to support,
the HTTP protocol version that was used in the request message.
"""
code = 505
def ParseHeaders(buf):
"""Parses HTTP headers.
@note: This is just a trivial wrapper around C{mimetools.Message}
"""
return mimetools.Message(buf, 0)
def SocketOperation(sock, op, arg1, timeout):
"""Wrapper around socket functions.
This function abstracts error handling for socket operations, especially
for the complicated interaction with OpenSSL.
@type sock: socket
@param sock: Socket for the operation
@type op: int
@param op: Operation to execute (SOCKOP_* constants)
@type arg1: any
@param arg1: Parameter for function (if needed)
@type timeout: None or float
@param timeout: Timeout in seconds or None
@return: Return value of socket function
"""
# TODO: event_poll/event_check/override
if op in (SOCKOP_SEND, SOCKOP_HANDSHAKE):
event_poll = select.POLLOUT
elif op == SOCKOP_RECV:
event_poll = select.POLLIN
elif op == SOCKOP_SHUTDOWN:
event_poll = None
# The timeout is only used when OpenSSL requests polling for a condition.
# It is not advisable to have no timeout for shutdown.
assert timeout
else:
raise AssertionError("Invalid socket operation")
# Handshake is only supported by SSL sockets
if (op == SOCKOP_HANDSHAKE and
not isinstance(sock, OpenSSL.SSL.ConnectionType)):
return
# No override by default
event_override = 0
while True:
# Poll only for certain operations and when asked for by an override
if event_override or op in (SOCKOP_SEND, SOCKOP_RECV, SOCKOP_HANDSHAKE):
if event_override:
wait_for_event = event_override
else:
wait_for_event = event_poll
event = utils.WaitForFdCondition(sock, wait_for_event, timeout)
if event is None:
raise HttpSocketTimeout()
if event & (select.POLLNVAL | select.POLLHUP | select.POLLERR):
# Let the socket functions handle these
break
if not event & wait_for_event:
continue
# Reset override
event_override = 0
try:
try:
if op == SOCKOP_SEND:
return sock.send(arg1)
elif op == SOCKOP_RECV:
return sock.recv(arg1)
elif op == SOCKOP_SHUTDOWN:
if isinstance(sock, OpenSSL.SSL.ConnectionType):
# PyOpenSSL's shutdown() doesn't take arguments
return sock.shutdown()
else:
return sock.shutdown(arg1)
elif op == SOCKOP_HANDSHAKE:
return sock.do_handshake()
except OpenSSL.SSL.WantWriteError:
# OpenSSL wants to write, poll for POLLOUT
event_override = select.POLLOUT
continue
except OpenSSL.SSL.WantReadError:
# OpenSSL wants to read, poll for POLLIN
event_override = select.POLLIN | select.POLLPRI
continue
except OpenSSL.SSL.WantX509LookupError:
continue
except OpenSSL.SSL.ZeroReturnError, err:
# SSL Connection has been closed. In SSL 3.0 and TLS 1.0, this only
# occurs if a closure alert has occurred in the protocol, i.e. the
# connection has been closed cleanly. Note that this does not
# necessarily mean that the transport layer (e.g. a socket) has been
# closed.
if op == SOCKOP_SEND:
# Can happen during a renegotiation
raise HttpConnectionClosed(err.args)
elif op == SOCKOP_RECV:
return ""
# SSL_shutdown shouldn't return SSL_ERROR_ZERO_RETURN
raise socket.error(err.args)
except OpenSSL.SSL.SysCallError, err:
if op == SOCKOP_SEND:
# arg1 is the data when writing
if err.args and err.args[0] == -1 and arg1 == "":
# errors when writing empty strings are expected
# and can be ignored
return 0
if err.args == (-1, _SSL_UNEXPECTED_EOF):
if op == SOCKOP_RECV:
return ""
elif op == SOCKOP_HANDSHAKE:
# Can happen if peer disconnects directly after the connection is
# opened.
raise HttpSessionHandshakeUnexpectedEOF(err.args)
raise socket.error(err.args)
except OpenSSL.SSL.Error, err:
raise socket.error(err.args)
except socket.error, err:
if err.args and err.args[0] == errno.EAGAIN:
# Ignore EAGAIN
continue
raise
def ShutdownConnection(sock, close_timeout, write_timeout, msgreader, force):
"""Closes the connection.
@type sock: socket
@param sock: Socket to be shut down
@type close_timeout: float
@param close_timeout: How long to wait for the peer to close
the connection
@type write_timeout: float
@param write_timeout: Write timeout for shutdown
@type msgreader: http.HttpMessageReader
@param msgreader: Request message reader, used to determine whether
peer should close connection
@type force: bool
@param force: Whether to forcibly close the connection without
waiting for peer
"""
#print msgreader.peer_will_close, force
if msgreader and msgreader.peer_will_close and not force:
# Wait for peer to close
try:
# Check whether it's actually closed
if not SocketOperation(sock, SOCKOP_RECV, 1, close_timeout):
return
except (socket.error, HttpError, HttpSocketTimeout):
# Ignore errors at this stage
pass
# Close the connection from our side
try:
# We don't care about the return value, see NOTES in SSL_shutdown(3).
SocketOperation(sock, SOCKOP_SHUTDOWN, socket.SHUT_RDWR,
write_timeout)
except HttpSocketTimeout:
raise HttpError("Timeout while shutting down connection")
except socket.error, err:
# Ignore ENOTCONN
if not (err.args and err.args[0] == errno.ENOTCONN):
raise HttpError("Error while shutting down connection: %s" % err)
def Handshake(sock, write_timeout):
"""Shakes peer's hands.
@type sock: socket
@param sock: Socket to be shut down
@type write_timeout: float
@param write_timeout: Write timeout for handshake
"""
try:
return SocketOperation(sock, SOCKOP_HANDSHAKE, None, write_timeout)
except HttpSocketTimeout:
raise HttpError("Timeout during SSL handshake")
except socket.error, err:
raise HttpError("Error in SSL handshake: %s" % err)
class HttpSslParams(object):
"""Data class for SSL key and certificate.
"""
def __init__(self, ssl_key_path, ssl_cert_path):
"""Initializes this class.
@type ssl_key_path: string
@param ssl_key_path: Path to file containing SSL key in PEM format
@type ssl_cert_path: string
@param ssl_cert_path: Path to file containing SSL certificate
in PEM format
"""
self.ssl_key_pem = utils.ReadFile(ssl_key_path)
self.ssl_cert_pem = utils.ReadFile(ssl_cert_path)
self.ssl_cert_path = ssl_cert_path
def GetKey(self):
return OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
self.ssl_key_pem)
def GetCertificate(self):
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
self.ssl_cert_pem)
class HttpBase(object):
"""Base class for HTTP server and client.
"""
def __init__(self):
self.using_ssl = None
self._ssl_params = None
self._ssl_key = None
self._ssl_cert = None
def _CreateSocket(self, ssl_params, ssl_verify_peer, family,
ssl_verify_callback):
"""Creates a TCP socket and initializes SSL if needed.
@type ssl_params: HttpSslParams
@param ssl_params: SSL key and certificate
@type ssl_verify_peer: bool
@param ssl_verify_peer: Whether to require client certificate
and compare it with our certificate
@type family: int
@param family: socket.AF_INET | socket.AF_INET6
"""
assert family in (socket.AF_INET, socket.AF_INET6)
if ssl_verify_peer:
assert ssl_verify_callback is not None
self._ssl_params = ssl_params
sock = socket.socket(family, socket.SOCK_STREAM)
# Should we enable SSL?
self.using_ssl = ssl_params is not None
if not self.using_ssl:
return sock
self._ssl_key = ssl_params.GetKey()
self._ssl_cert = ssl_params.GetCertificate()
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2)
ciphers = self.GetSslCiphers()
logging.debug("Setting SSL cipher string %s", ciphers)
ctx.set_cipher_list(ciphers)
ctx.use_privatekey(self._ssl_key)
ctx.use_certificate(self._ssl_cert)
ctx.check_privatekey()
if ssl_verify_peer:
ctx.set_verify(OpenSSL.SSL.VERIFY_PEER |
OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
ssl_verify_callback)
# Also add our certificate as a trusted CA to be sent to the client.
# This is required at least for GnuTLS clients to work.
try:
# This will fail for PyOpenssl versions before 0.10
ctx.add_client_ca(self._ssl_cert)
except AttributeError:
# Fall back to letting OpenSSL read the certificate file directly.
ctx.load_client_ca(ssl_params.ssl_cert_path)
return OpenSSL.SSL.Connection(ctx, sock)
def GetSslCiphers(self): # pylint: disable=R0201
"""Returns the ciphers string for SSL.
"""
return constants.OPENSSL_CIPHERS
def _SSLVerifyCallback(self, conn, cert, errnum, errdepth, ok):
"""Verify the certificate provided by the peer
We only compare fingerprints. The client must use the same certificate as
we do on our side.
"""
# some parameters are unused, but this is the API
# pylint: disable=W0613
assert self._ssl_params, "SSL not initialized"
return (self._ssl_cert.digest("sha1") == cert.digest("sha1") and
self._ssl_cert.digest("md5") == cert.digest("md5"))
class HttpMessage(object):
"""Data structure for HTTP message.
"""
def __init__(self):
self.start_line = None
self.headers = None
self.body = None
class HttpClientToServerStartLine(object):
"""Data structure for HTTP request start line.
"""
def __init__(self, method, path, version):
self.method = method
self.path = path
self.version = version
def __str__(self):
return "%s %s %s" % (self.method, self.path, self.version)
class HttpServerToClientStartLine(object):
"""Data structure for HTTP response start line.
"""
def __init__(self, version, code, reason):
self.version = version
self.code = code
self.reason = reason
def __str__(self):
return "%s %s %s" % (self.version, self.code, self.reason)
class HttpMessageWriter(object):
"""Writes an HTTP message to a socket.
"""
def __init__(self, sock, msg, write_timeout):
"""Initializes this class and writes an HTTP message to a socket.
@type sock: socket
@param sock: Socket to be written to
@type msg: http.HttpMessage
@param msg: HTTP message to be written
@type write_timeout: float
@param write_timeout: Write timeout for socket
"""
self._msg = msg
self._PrepareMessage()
buf = self._FormatMessage()
pos = 0
end = len(buf)
while pos < end:
# Send only SOCK_BUF_SIZE bytes at a time
data = buf[pos:(pos + SOCK_BUF_SIZE)]
sent = SocketOperation(sock, SOCKOP_SEND, data, write_timeout)
# Remove sent bytes
pos += sent
assert pos == end, "Message wasn't sent completely"
def _PrepareMessage(self):
"""Prepares the HTTP message by setting mandatory headers.
"""
# RFC2616, section 4.3: "The presence of a message-body in a request is
# signaled by the inclusion of a Content-Length or Transfer-Encoding header
# field in the request's message-headers."
if self._msg.body:
self._msg.headers[HTTP_CONTENT_LENGTH] = len(self._msg.body)
def _FormatMessage(self):
"""Serializes the HTTP message into a string.
"""
buf = StringIO()
# Add start line
buf.write(str(self._msg.start_line))
buf.write("\r\n")
# Add headers
if self._msg.start_line.version != HTTP_0_9:
for name, value in self._msg.headers.iteritems():
buf.write("%s: %s\r\n" % (name, value))
buf.write("\r\n")
# Add message body if needed
if self.HasMessageBody():
buf.write(self._msg.body)
elif self._msg.body:
logging.warning("Ignoring message body")
return buf.getvalue()
def HasMessageBody(self):
"""Checks whether the HTTP message contains a body.
Can be overridden by subclasses.
"""
return bool(self._msg.body)
class HttpMessageReader(object):
"""Reads HTTP message from socket.
"""
# Length limits
START_LINE_LENGTH_MAX = None
HEADER_LENGTH_MAX = None
# Parser state machine
PS_START_LINE = "start-line"
PS_HEADERS = "headers"
PS_BODY = "entity-body"
PS_COMPLETE = "complete"
def __init__(self, sock, msg, read_timeout):
"""Reads an HTTP message from a socket.
@type sock: socket
@param sock: Socket to be read from
@type msg: http.HttpMessage
@param msg: Object for the read message
@type read_timeout: float
@param read_timeout: Read timeout for socket
"""
self.sock = sock
self.msg = msg
self.start_line_buffer = None
self.header_buffer = StringIO()
self.body_buffer = StringIO()
self.parser_status = self.PS_START_LINE
self.content_length = None
self.peer_will_close = None
buf = ""
eof = False
while self.parser_status != self.PS_COMPLETE:
# TODO: Don't read more than necessary (Content-Length), otherwise
# data might be lost and/or an error could occur
data = SocketOperation(sock, SOCKOP_RECV, SOCK_BUF_SIZE, read_timeout)
if data:
buf += data
else:
eof = True
# Do some parsing and error checking while more data arrives
buf = self._ContinueParsing(buf, eof)
# Must be done only after the buffer has been evaluated
# TODO: Content-Length < len(data read) and connection closed
if (eof and
self.parser_status in (self.PS_START_LINE,
self.PS_HEADERS)):
raise HttpError("Connection closed prematurely")
# Parse rest
buf = self._ContinueParsing(buf, True)
assert self.parser_status == self.PS_COMPLETE
assert not buf, "Parser didn't read full response"
# Body is complete
msg.body = self.body_buffer.getvalue()
def _ContinueParsing(self, buf, eof):
"""Main function for HTTP message state machine.
@type buf: string
@param buf: Receive buffer
@type eof: bool
@param eof: Whether we've reached EOF on the socket
@rtype: string
@return: Updated receive buffer
"""
# TODO: Use offset instead of slicing when possible
if self.parser_status == self.PS_START_LINE:
# Expect start line
while True:
idx = buf.find("\r\n")
# RFC2616, section 4.1: "In the interest of robustness, servers SHOULD
# ignore any empty line(s) received where a Request-Line is expected.
# In other words, if the server is reading the protocol stream at the
# beginning of a message and receives a CRLF first, it should ignore
# the CRLF."
if idx == 0:
# TODO: Limit number of CRLFs/empty lines for safety?
buf = buf[2:]
continue
if idx > 0:
self.start_line_buffer = buf[:idx]
self._CheckStartLineLength(len(self.start_line_buffer))
# Remove status line, including CRLF
buf = buf[idx + 2:]
self.msg.start_line = self.ParseStartLine(self.start_line_buffer)
self.parser_status = self.PS_HEADERS
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckStartLineLength(len(buf))
break
# TODO: Handle messages without headers
if self.parser_status == self.PS_HEADERS:
# Wait for header end
idx = buf.find("\r\n\r\n")
if idx >= 0:
self.header_buffer.write(buf[:idx + 2])
self._CheckHeaderLength(self.header_buffer.tell())
# Remove headers, including CRLF
buf = buf[idx + 4:]
self._ParseHeaders()
self.parser_status = self.PS_BODY
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckHeaderLength(len(buf))
if self.parser_status == self.PS_BODY:
# TODO: Implement max size for body_buffer
self.body_buffer.write(buf)
buf = ""
# Check whether we've read everything
#
# RFC2616, section 4.4: "When a message-body is included with a message,
# the transfer-length of that body is determined by one of the following
# [...] 5. By the server closing the connection. (Closing the connection
# cannot be used to indicate the end of a request body, since that would
# leave no possibility for the server to send back a response.)"
#
# TODO: Error when buffer length > Content-Length header
if (eof or
self.content_length is None or
(self.content_length is not None and
self.body_buffer.tell() >= self.content_length)):
self.parser_status = self.PS_COMPLETE
return buf
def _CheckStartLineLength(self, length):
"""Limits the start line buffer size.
@type length: int
@param length: Buffer size
"""
if (self.START_LINE_LENGTH_MAX is not None and
length > self.START_LINE_LENGTH_MAX):
raise HttpError("Start line longer than %d chars" %
self.START_LINE_LENGTH_MAX)
def _CheckHeaderLength(self, length):
"""Limits the header buffer size.
@type length: int
@param length: Buffer size
"""
if (self.HEADER_LENGTH_MAX is not None and
length > self.HEADER_LENGTH_MAX):
raise HttpError("Headers longer than %d chars" % self.HEADER_LENGTH_MAX)
def ParseStartLine(self, start_line):
"""Parses the start line of a message.
Must be overridden by subclass.
@type start_line: string
@param start_line: Start line string
"""
raise NotImplementedError()
def _WillPeerCloseConnection(self):
"""Evaluate whether peer will close the connection.
@rtype: bool
@return: Whether peer will close the connection
"""
# RFC2616, section 14.10: "HTTP/1.1 defines the "close" connection option
# for the sender to signal that the connection will be closed after
# completion of the response. For example,
#
# Connection: close
#
# in either the request or the response header fields indicates that the
# connection SHOULD NOT be considered `persistent' (section 8.1) after the
# current request/response is complete."
hdr_connection = self.msg.headers.get(HTTP_CONNECTION, None)
if hdr_connection:
hdr_connection = hdr_connection.lower()
# An HTTP/1.1 server is assumed to stay open unless explicitly closed.
if self.msg.start_line.version == HTTP_1_1:
return (hdr_connection and "close" in hdr_connection)
# Some HTTP/1.0 implementations have support for persistent connections,
# using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.headers.get(HTTP_KEEP_ALIVE):
return False
# At least Akamai returns a "Connection: Keep-Alive" header, which was
# supposed to be sent by the client.
if hdr_connection and "keep-alive" in hdr_connection:
return False
return True
def _ParseHeaders(self):
"""Parses the headers.
This function also adjusts internal variables based on header values.
RFC2616, section 4.3: The presence of a message-body in a request is
signaled by the inclusion of a Content-Length or Transfer-Encoding header
field in the request's message-headers.
"""
# Parse headers
self.header_buffer.seek(0, 0)
self.msg.headers = ParseHeaders(self.header_buffer)
self.peer_will_close = self._WillPeerCloseConnection()
# Do we have a Content-Length header?
hdr_content_length = self.msg.headers.get(HTTP_CONTENT_LENGTH, None)
if hdr_content_length:
try:
self.content_length = int(hdr_content_length)
except (TypeError, ValueError):
self.content_length = None
if self.content_length is not None and self.content_length < 0:
self.content_length = None
# if the connection remains open and a content-length was not provided,
# then assume that the connection WILL close.
if self.content_length is None:
self.peer_will_close = True
| apyrgio/ganeti | lib/http/__init__.py | Python | bsd-2-clause | 28,992 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.cognitiveservices.search.entitysearch import EntitySearchAPI
from msrest.authentication import CognitiveServicesCredentials
from azure_devtools.scenario_tests import ReplayableTest, AzureTestError
from devtools_testutils import mgmt_settings_fake as fake_settings
class EntitySearchTest(ReplayableTest):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key']
def __init__(self, method_name):
self._fake_settings, self._real_settings = self._load_settings()
super(EntitySearchTest, self).__init__(method_name)
@property
def settings(self):
if self.is_live:
if self._real_settings:
return self._real_settings
else:
raise AzureTestError('Need a mgmt_settings_real.py file to run tests live.')
else:
return self._fake_settings
def _load_settings(self):
try:
from devtools_testutils import mgmt_settings_real as real_settings
return fake_settings, real_settings
except ImportError:
return fake_settings, None
def test_search(self):
query = 'seahawks'
market = 'en-us'
credentials = CognitiveServicesCredentials(
self.settings.CS_SUBSCRIPTION_KEY
)
entity_search_api = EntitySearchAPI(credentials)
response = entity_search_api.entities.search(query=query, market=market)
assert response is not None
assert response._type is not None
assert response.query_context is not None
assert response.query_context.original_query == query
assert response.entities is not None
assert response.entities.value is not None
assert len(response.entities.value) == 1
assert response.entities.value[0].contractual_rules is not None
| lmazuel/azure-sdk-for-python | azure-cognitiveservices-search-entitysearch/tests/test_entity_search.py | Python | mit | 2,330 |
#!/usr/bin/python
import server
import config
def main(server_class=server.MultiThreadedHTTPServer, handler_class=server.RequestHandler):
# set up the server socket based on values in config.py
server_address = (config.address, config.port)
# create the server object bound to the server socket
httpd = server_class(server_address, handler_class)
try:
# start the server
httpd.serve_forever()
except:
print "Stopping..."
# stop the server on error or keyboard interrupt
httpd.shutdown()
if __name__ == "__main__":
# if we are running this file directly (not importing)
# then run the main() function
main()
| nettux443/pywebframe | start.py | Python | bsd-2-clause | 686 |
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_approx,
assert_equal,
)
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timeout = 480
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.0001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.0001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main()
| OmniLayer/omnicore | test/functional/wallet_groups.py | Python | mit | 3,669 |
"""
WSGI config for ctlibre project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "ctlibre.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ctlibre.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| dellsystem/ctlibre.com | ctlibre/wsgi.py | Python | agpl-3.0 | 1,422 |
#!/usr/bin/env python3
import gi
gi.require_version('LibvirtGObject', '1.0')
from gi.repository import LibvirtGObject
from gi.repository import Gio
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
LibvirtGObject.init_object_check(None)
conn = LibvirtGObject.Connection(uri="test:///default")
canc = Gio.Cancellable()
def done(conn, result, data):
try:
conn.open_finish(result)
print("Opened " + conn.get_uri())
conn.fetch_domains(None)
print ("Fetched")
doms = conn.get_domains()
print ("Got " + str(doms))
for d in doms:
print ("One dom: " + str(d))
print ("Name " + d.get_name())
conf = d.get_config(0)
print ("Conf " + str(conf))
try:
conf.validate()
print ("Document is valid according to %s" % conf.get_schema())
except Exception as e:
print ("Document is not valid according to %s: %s: %s" % (conf.get_schema(), str(e), str(type(e))))
xml = conf.to_xml()
print ("XML " + xml)
print ("Info " + str(d.get_info().memory))
finally:
Gtk.main_quit()
conn.open_async(canc, done, None)
Gtk.main()
| libvirt/libvirt-glib | examples/conn-test.py | Python | lgpl-2.1 | 1,252 |
##############################################################################
#
# Copyright (C) 2021 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Jonathan Guerne <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models, fields
class DemandPlanningSettings(models.TransientModel):
_inherit = "res.config.settings"
new_donors_user = fields.Many2one(
"res.users", "User to notify on new donors onboarding opt out", readonly=False
)
@api.multi
def set_values(self):
super().set_values()
self.env["ir.config_parameter"].set_param(
"partner_communication_switzerland.new_donors_user",
str(self.new_donors_user.id or 0),
)
@api.model
def get_values(self):
res = super().get_values()
param_obj = self.env["ir.config_parameter"].sudo()
new_donors_user_id = int(
param_obj.get_param("partner_communication_switzerland.new_donors_user", "0")
)
res.update(
{"new_donors_user": new_donors_user_id, }
)
return res
| CompassionCH/compassion-switzerland | partner_communication_switzerland/wizards/onboarding_settings.py | Python | agpl-3.0 | 1,263 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Draft'
db.create_table('draft_draft', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, blank=True)),
('serialized_data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('draft', ['Draft'])
def backwards(self, orm):
# Deleting model 'Draft'
db.delete_table('draft_draft')
models = {
'draft.draft': {
'Meta': {'object_name': 'Draft'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'serialized_data': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['draft']
| platypus-creation/django-draft | draft/migrations/0001_initial.py | Python | mit | 1,137 |
from typing import Optional
from fastapi import FastAPI
from pydantic import BaseModel, EmailStr
app = FastAPI()
class UserIn(BaseModel):
username: str
password: str
email: EmailStr
full_name: Optional[str] = None
# Don't do this in production!
@app.post("/user/", response_model=UserIn)
async def create_user(user: UserIn):
return user
| tiangolo/fastapi | docs_src/response_model/tutorial002.py | Python | mit | 363 |
"""
This is an example settings/local.py file.
These settings overrides what's in settings/base.py
"""
import logging
# To extend any settings from settings/base.py here's an example:
#from . import base
#INSTALLED_APPS = base.INSTALLED_APPS + ['debug_toolbar']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db/development.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Uncomment this and set to all slave DBs in use on the site.
# SLAVE_DATABASES = ['slave']
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = True
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = True
# Make this unique, and don't share it with anybody. It cannot be blank.
SECRET_KEY = 'r&3%!i63)=vdhohrd^ht(!y&1j)hpc03w!31q*%!==t_upz8!5'
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
# BROKER_PORT = 5672
# BROKER_USER = 'django'
# BROKER_PASSWORD = 'django'
# BROKER_VHOST = 'django'
# CELERY_RESULT_BACKEND = 'amqp'
## Log settings
LOG_LEVEL = logging.INFO
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_mnms" # Make this unique to your project.
# Remove this configuration variable to use your custom logging configuration
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'loggers': {
'mnms': {
'level': "DEBUG"
}
}
}
# Common Event Format logging parameters
#CEF_PRODUCT = 'mnms'
#CEF_VENDOR = 'Your Company'
#CEF_VERSION = '0'
#CEF_DEVICE_VERSION = '0'
INTERNAL_IPS = ('127.0.0.1')
# Enable these options for memcached
#CACHE_BACKEND= "memcached://127.0.0.1:11211/"
#CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True
# Set this to true if you use a proxy that sets X-Forwarded-Host
#USE_X_FORWARDED_HOST = False
SERVER_EMAIL = "[email protected]"
DEFAULT_FROM_EMAIL = "[email protected]"
SYSTEM_EMAIL_PREFIX = "[mnms]"
| blorenz/SEO | mnms/settings/local-dist.py | Python | bsd-3-clause | 2,867 |
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Windows shell documentation fragment
# FIXME: set_module_language don't belong here but must be set so they don't fail when someone
# get_option('set_module_language') on this plugin
DOCUMENTATION = """
options:
async_dir:
description:
- Directory in which ansible will keep async job information.
- Before Ansible 2.8, this was set to C(remote_tmp + "\\.ansible_async").
default: '%USERPROFILE%\\.ansible_async'
ini:
- section: powershell
key: async_dir
vars:
- name: ansible_async_dir
version_added: '2.8'
remote_tmp:
description:
- Temporary directory to use on targets when copying files to the host.
default: '%TEMP%'
ini:
- section: powershell
key: remote_tmp
vars:
- name: ansible_remote_tmp
set_module_language:
description:
- Controls if we set the locale for modules when executing on the
target.
- Windows only supports C(no) as an option.
type: bool
default: 'no'
choices: ['no', False]
environment:
description:
- List of dictionaries of environment variables and their values to use when
executing commands.
type: list
default: [{}]
"""
| privateip/ansible | lib/ansible/plugins/doc_fragments/shell_windows.py | Python | gpl-3.0 | 1,460 |
import unittest
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from pyramid import testing
from .models import Session
DEFAULT_WAIT = 5
SCREEN_DUMP_LOCATION = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'screendumps'
)
class TestMyViewSuccessCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
Session.remove()
testing.tearDown()
def test_passing_view(self):
pass
class TestMyViewFailureCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
Session.remove()
testing.tearDown()
def test_failing_view(self):
pass
class FunctionalTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(DEFAULT_WAIT)
def tearDown(self):
Session.remove()
testing.tearDown()
self.browser.quit()
class HomePageTest(FunctionalTest):
def zoom_in(self, repeat=1, sleep_time=1):
for _ in range(repeat):
(self.browser.find_element_by_class_name(
"leaflet-control-zoom-in").click()
)
sleep(sleep_time)
def zoom_out(self, repeat=1, sleep_time=1):
for _ in range(repeat):
(self.browser.find_element_by_class_name(
"leaflet-control-zoom-out").click()
)
sleep(sleep_time)
def arrow_down(self, repeat=1, sleep_time=1):
for _ in range(repeat):
self.browser.find_element_by_id("map").send_keys(Keys.ARROW_DOWN)
sleep(sleep_time)
def arrow_right(self, repeat=1, sleep_time=1):
for _ in range(repeat):
self.browser.find_element_by_id("map").send_keys(Keys.ARROW_RIGHT)
sleep(sleep_time)
def arrow_left(self, repeat=1, sleep_time=1):
for _ in range(repeat):
self.browser.find_element_by_id("map").send_keys(Keys.ARROW_LEFT)
sleep(sleep_time)
def arrow_up(self, repeat=1, sleep_time=1):
for _ in range(repeat):
self.browser.find_element_by_id("map").send_keys(Keys.ARROW_UP)
sleep(sleep_time)
# Tests here
def test_home_page_loads(self):
#Billy sees the landsat.club homepage and rejoices. Clicking ensues.
self.browser.get('localhost:8000')
self.zoom_out(repeat=5, sleep_time=.5)
self.arrow_right(repeat=5, sleep_time=.2)
self.arrow_down(repeat=3, sleep_time=.2)
self.browser.find_element_by_class_name(
'leaflet-control-mapbox-geocoder-toggle').click()
map_input_form = '//*[@id="map"]/div[2]/div[1]/div[2]/div[2]/form/input'
(self.browser.find_element_by_xpath(map_input_form)
.send_keys('10010', Keys.RETURN)
)
sleep(.75)
self.zoom_out(repeat=3)
self.assertIn('Snapsat', self.browser.page_source)
| recombinators/snapsat | app/app/tests.py | Python | mit | 3,083 |
#
# Copyright (C) 2009 Aaron C Spike
# 2010 Martin Owens
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""
Python barcode renderer for EAN5 barcodes. Designed for use with Inkscape.
"""
from BaseEan import EanBarcode
FAMS = [ '11000','10100','10010','10001','01100','00110','00011','01010','01001','00101' ]
START = '01011'
class Ean5(EanBarcode):
"""Provide an Ean5 barcode generator"""
name = 'ean5'
length = 5
def _encode(self, number):
self.x += 110.0*self.scale # horiz offset so it does not overlap EAN13
self.y -= (self.height + 5)*self.scale # move the text to the top
self.label = ' '.join(self.space(number))
family = sum([int(n)*int(m) for n,m in zip(number, '39393')]) % 10
return START + '01'.join(self.encode_interleaved(family, number, FAMS))
| danieljabailey/inkscape_experiments | share/extensions/Barcode/Ean5.py | Python | gpl-2.0 | 1,528 |
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import six
import radiomics
from radiomics import featureextractor, getFeatureClasses
# Get some test data
# Download the test case to temporary files and return it's location. If already downloaded, it is not downloaded again,
# but it's location is still returned.
imageName, maskName = radiomics.getTestCase('brain1')
# Get the location of the example settings file
paramsFile = os.path.abspath(os.path.join('exampleSettings', 'Params.yaml'))
if imageName is None or maskName is None: # Something went wrong, in this case PyRadiomics will also log an error
print('Error getting testcase!')
exit()
# Regulate verbosity with radiomics.verbosity
# radiomics.setVerbosity(logging.INFO)
# Get the PyRadiomics logger (default log-level = INFO
logger = radiomics.logger
logger.setLevel(logging.DEBUG) # set level to DEBUG to include debug log messages in log file
# Write out all log entries to a file
handler = logging.FileHandler(filename='testLog.txt', mode='w')
formatter = logging.Formatter("%(levelname)s:%(name)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# Initialize feature extractor using the settings file
extractor = featureextractor.RadiomicsFeatureExtractor(paramsFile)
featureClasses = getFeatureClasses()
print("Active features:")
for cls, features in six.iteritems(extractor.enabledFeatures):
if features is None or len(features) == 0:
features = [f for f, deprecated in six.iteritems(featureClasses[cls].getFeatureNames()) if not deprecated]
for f in features:
print(f)
print(getattr(featureClasses[cls], 'get%sFeatureValue' % f).__doc__)
print("Calculating features")
featureVector = extractor.execute(imageName, maskName)
for featureName in featureVector.keys():
print("Computed %s: %s" % (featureName, featureVector[featureName]))
| Radiomics/pyradiomics | examples/helloRadiomicsWithSettings.py | Python | bsd-3-clause | 1,904 |
#!/usr/bin/evn python
"""
Senty Project
Copyright(c) 2017 Senty.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Kamal Heib <[email protected]>
"""
| Kamalheib/senty | senty/packages/rdmacm/__init__.py | Python | gpl-2.0 | 790 |
# website/context_processors.py
from django.conf import settings
def ga_tracking_id(request):
return {'ga_tracking_id': settings.GA_TRACKING_ID}
def use_google_analytics(request):
return {'use_google_analytics': settings.USE_GA}
| ItsCalebJones/SpaceLaunchNow-Server | spacelaunchnow/context_processor.py | Python | apache-2.0 | 241 |
import pytest
import sys
from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup
from _pytest.runner import runtestprotocol
class TestEvaluator:
def test_no_marker(self, testdir):
item = testdir.getitem("def test_func(): pass")
evalskipif = MarkEvaluator(item, 'skipif')
assert not evalskipif
assert not evalskipif.istrue()
def test_marked_no_args(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == ""
assert not ev.get("run", False)
def test_marked_one_arg(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
@pytest.mark.skipif('sys.version_info[0] >= 3')
def test_marked_one_arg_unicode(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz(u"hasattr(os, 'sep')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
def test_marked_one_arg_with_reason(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'xyz')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "hello world"
assert ev.get("attr") == 2
def test_marked_one_arg_twice(self, testdir):
lines = [
'''@pytest.mark.skipif("not hasattr(os, 'murks')")''',
'''@pytest.mark.skipif("hasattr(os, 'murks')")'''
]
for i in range(0, 2):
item = testdir.getitem("""
import pytest
%s
%s
def test_func():
pass
""" % (lines[i], lines[(i+1) %2]))
ev = MarkEvaluator(item, 'skipif')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_twice2(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif("hasattr(os, 'murks')")
@pytest.mark.skipif("not hasattr(os, 'murks')")
def test_func():
pass
""")
ev = MarkEvaluator(item, 'skipif')
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_skip_with_not_string(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif(False)
def test_func():
pass
""")
ev = MarkEvaluator(item, 'skipif')
exc = pytest.raises(pytest.fail.Exception, ev.istrue)
assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg
def test_skipif_class(self, testdir):
item, = testdir.getitems("""
import pytest
class TestClass:
pytestmark = pytest.mark.skipif("config._hackxyz")
def test_func(self):
pass
""")
item.config._hackxyz = 3
ev = MarkEvaluator(item, 'skipif')
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: config._hackxyz"
class TestXFail:
@pytest.mark.parametrize('strict', [True, False])
def test_xfail_simple(self, testdir, strict):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail(strict=%s)
def test_func():
assert 0
""" % strict)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.skipped
assert callreport.wasxfail == ""
def test_xfail_xpassed(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail
def test_func():
assert 1
""")
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.failed
assert callreport.wasxfail == ""
def test_xfail_run_anyway(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
def test_func2():
pytest.xfail("hello")
""")
result = testdir.runpytest("--runxfail")
result.stdout.fnmatch_lines([
"*def test_func():*",
"*assert 0*",
"*1 failed*1 pass*",
])
def test_xfail_evalfalse_but_fails(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail('False')
def test_func():
assert 0
""")
reports = runtestprotocol(item, log=False)
callreport = reports[1]
assert callreport.failed
assert not hasattr(callreport, "wasxfail")
assert 'xfail' in callreport.keywords
def test_xfail_not_report_default(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail
def test_this():
assert 0
""")
testdir.runpytest(p, '-v')
#result.stdout.fnmatch_lines([
# "*HINT*use*-r*"
#])
def test_xfail_not_run_xfail_reporting(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="noway")
def test_this():
assert 0
@pytest.mark.xfail("True", run=False)
def test_this_true():
assert 0
@pytest.mark.xfail("False", run=False, reason="huh")
def test_this_false():
assert 1
""")
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*",
"*NOTRUN*noway",
"*test_one*test_this_true*",
"*NOTRUN*condition:*True*",
"*1 passed*",
])
def test_xfail_not_run_no_setup_run(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="hello")
def test_this():
assert 0
def setup_module(mod):
raise ValueError(42)
""")
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*",
"*NOTRUN*hello",
"*1 xfailed*",
])
def test_xfail_xpass(self, testdir):
p = testdir.makepyfile(test_one="""
import pytest
@pytest.mark.xfail
def test_that():
assert 1
""")
result = testdir.runpytest(p, '-rX')
result.stdout.fnmatch_lines([
"*XPASS*test_that*",
"*1 xpassed*"
])
assert result.ret == 0
def test_xfail_imperative(self, testdir):
p = testdir.makepyfile("""
import pytest
def test_this():
pytest.xfail("hello")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*reason:*hello*",
])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines("*1 pass*")
def test_xfail_imperative_in_setup_function(self, testdir):
p = testdir.makepyfile("""
import pytest
def setup_function(function):
pytest.xfail("hello")
def test_this():
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*reason:*hello*",
])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines("""
*def test_this*
*1 fail*
""")
def xtest_dynamic_xfail_set_during_setup(self, testdir):
p = testdir.makepyfile("""
import pytest
def setup_function(function):
pytest.mark.xfail(function)
def test_this():
assert 0
def test_that():
assert 1
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*XPASS*test_that*",
])
def test_dynamic_xfail_no_run(self, testdir):
p = testdir.makepyfile("""
import pytest
def pytest_funcarg__arg(request):
request.applymarker(pytest.mark.xfail(run=False))
def test_this(arg):
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*NOTRUN*",
])
def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
p = testdir.makepyfile("""
import pytest
def pytest_funcarg__arg(request):
request.applymarker(pytest.mark.xfail)
def test_this2(arg):
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
@pytest.mark.parametrize('expected, actual, matchline',
[('TypeError', 'TypeError', "*1 xfailed*"),
('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
('TypeError', 'IndexError', "*1 failed*"),
('(AttributeError, TypeError)', 'IndexError', "*1 failed*"),
])
def test_xfail_raises(self, expected, actual, matchline, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=%s)
def test_raises():
raise %s()
""" % (expected, actual))
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
matchline,
])
def test_strict_sanity(self, testdir):
"""sanity check for xfail(strict=True): a failing test should behave
exactly like a normal xfail.
"""
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=True)
def test_foo():
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
'*XFAIL*',
'*unsupported feature*',
])
assert result.ret == 0
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
def test_foo():
with open('foo_executed', 'w'): pass # make sure test executes
""" % strict)
result = testdir.runpytest(p, '-rxX')
if strict:
result.stdout.fnmatch_lines([
'*test_foo*',
'*XPASS(strict)*unsupported feature*',
])
else:
result.stdout.fnmatch_lines([
'*test_strict_xfail*',
'XPASS test_strict_xfail.py::test_foo unsupported feature',
])
assert result.ret == (1 if strict else 0)
assert testdir.tmpdir.join('foo_executed').isfile()
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail_condition(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == 0
@pytest.mark.parametrize('strict', [True, False])
def test_xfail_condition_keyword(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(condition=False, reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == 0
@pytest.mark.parametrize('strict_val', ['true', 'false'])
def test_strict_xfail_default_from_file(self, testdir, strict_val):
testdir.makeini('''
[pytest]
xfail_strict = %s
''' % strict_val)
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature')
def test_foo():
pass
""")
result = testdir.runpytest(p, '-rxX')
strict = strict_val == 'true'
result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*')
assert result.ret == (1 if strict else 0)
class TestXFailwithSetupTeardown:
def test_failing_setup_issue9(self, testdir):
testdir.makepyfile("""
import pytest
def setup_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 xfail*",
])
def test_failing_teardown_issue9(self, testdir):
testdir.makepyfile("""
import pytest
def teardown_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 xfail*",
])
class TestSkip:
def test_skip_class(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
class TestSomething(object):
def test_foo(self):
pass
def test_bar(self):
pass
def test_baz():
pass
""")
rec = testdir.inline_run()
rec.assertoutcome(skipped=2, passed=1)
def test_skips_on_false_string(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip('False')
def test_foo():
pass
""")
rec = testdir.inline_run()
rec.assertoutcome(skipped=1)
def test_arg_as_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip('testing stuff')
def test_bar():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*testing stuff*",
"*1 skipped*",
])
def test_skip_no_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_foo():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*unconditional skip*",
"*1 skipped*",
])
def test_skip_with_reason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip(reason="for lolz")
def test_bar():
pass
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*for lolz*",
"*1 skipped*",
])
def test_only_skips_marked_test(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_foo():
pass
@pytest.mark.skip(reason="nothing in particular")
def test_bar():
pass
def test_baz():
assert True
""")
result = testdir.runpytest('-rs')
result.stdout.fnmatch_lines([
"*nothing in particular*",
"*1 passed*2 skipped*",
])
def test_strict_and_skip(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_hello():
pass
""")
result = testdir.runpytest("-rs --strict")
result.stdout.fnmatch_lines([
"*unconditional skip*",
"*1 skipped*",
])
class TestSkipif:
def test_skipif_conditional(self, testdir):
item = testdir.getitem("""
import pytest
@pytest.mark.skipif("hasattr(os, 'sep')")
def test_func():
pass
""") # noqa
x = pytest.raises(pytest.skip.Exception, lambda:
pytest_runtest_setup(item))
assert x.value.msg == "condition: hasattr(os, 'sep')"
@pytest.mark.parametrize('params', [
'"hasattr(sys, \'platform\')"',
'True, reason="invalid platform"',
])
def test_skipif_reporting(self, testdir, params):
p = testdir.makepyfile(test_foo="""
import pytest
@pytest.mark.skipif(%(params)s)
def test_that():
assert 0
""" % dict(params=params))
result = testdir.runpytest(p, '-s', '-rs')
result.stdout.fnmatch_lines([
"*SKIP*1*test_foo.py*platform*",
"*1 skipped*"
])
assert result.ret == 0
@pytest.mark.parametrize('marker, msg1, msg2', [
('skipif', 'SKIP', 'skipped'),
('xfail', 'XPASS', 'xpassed'),
])
def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2):
testdir.makepyfile(test_foo="""
import pytest
@pytest.mark.{marker}(False, reason='first_condition')
@pytest.mark.{marker}(True, reason='second_condition')
def test_foobar():
assert 1
""".format(marker=marker))
result = testdir.runpytest('-s', '-rsxX')
result.stdout.fnmatch_lines([
"*{msg1}*test_foo.py*second_condition*".format(msg1=msg1),
"*1 {msg2}*".format(msg2=msg2),
])
assert result.ret == 0
def test_skip_not_report_default(testdir):
p = testdir.makepyfile(test_one="""
import pytest
def test_this():
pytest.skip("hello")
""")
result = testdir.runpytest(p, '-v')
result.stdout.fnmatch_lines([
#"*HINT*use*-r*",
"*1 skipped*",
])
def test_skipif_class(testdir):
p = testdir.makepyfile("""
import pytest
class TestClass:
pytestmark = pytest.mark.skipif("True")
def test_that(self):
assert 0
def test_though(self):
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*2 skipped*"
])
def test_skip_reasons_folding():
path = 'xyz'
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X:
pass
ev1 = X()
ev1.when = "execute"
ev1.skipped = True
ev1.longrepr = longrepr
ev2 = X()
ev2.longrepr = longrepr
ev2.skipped = True
l = folded_skips([ev1, ev2])
assert len(l) == 1
num, fspath, lineno, reason = l[0]
assert num == 2
assert fspath == path
assert lineno == lineno
assert reason == message
def test_skipped_reasons_functional(testdir):
testdir.makepyfile(
test_one="""
from conftest import doskip
def setup_function(func):
doskip()
def test_func():
pass
class TestClass:
def test_method(self):
doskip()
""",
test_two = """
from conftest import doskip
doskip()
""",
conftest = """
import pytest
def doskip():
pytest.skip('test')
"""
)
result = testdir.runpytest('--report=skipped')
result.stdout.fnmatch_lines([
"*SKIP*3*conftest.py:3: test",
])
assert result.ret == 0
def test_reportchars(testdir):
testdir.makepyfile("""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
""")
result = testdir.runpytest("-rfxXs")
result.stdout.fnmatch_lines([
"FAIL*test_1*",
"XFAIL*test_2*",
"XPASS*test_3*",
"SKIP*four*",
])
def test_reportchars_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""")
result = testdir.runpytest('-rE')
result.stdout.fnmatch_lines([
'ERROR*test_foo*',
])
def test_reportchars_all(testdir):
testdir.makepyfile("""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
""")
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines([
"FAIL*test_1*",
"SKIP*four*",
"XFAIL*test_2*",
"XPASS*test_3*",
])
def test_reportchars_all_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""")
result = testdir.runpytest('-ra')
result.stdout.fnmatch_lines([
'ERROR*test_foo*',
])
@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
def test_errors_in_xfail_skip_expressions(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif("asd")
def test_nameerror():
pass
@pytest.mark.xfail("syntax error")
def test_syntax():
pass
def test_func():
pass
""")
result = testdir.runpytest()
markline = " ^"
if sys.platform.startswith("java"):
# XXX report this to java
markline = "*" + markline[8:]
result.stdout.fnmatch_lines([
"*ERROR*test_nameerror*",
"*evaluating*skipif*expression*",
"*asd*",
"*ERROR*test_syntax*",
"*evaluating*xfail*expression*",
" syntax error",
markline,
"SyntaxError: invalid syntax",
"*1 pass*2 error*",
])
def test_xfail_skipif_with_globals(testdir):
testdir.makepyfile("""
import pytest
x = 3
@pytest.mark.skipif("x == 3")
def test_skip1():
pass
@pytest.mark.xfail("x == 3")
def test_boolean():
assert 0
""")
result = testdir.runpytest("-rsx")
result.stdout.fnmatch_lines([
"*SKIP*x == 3*",
"*XFAIL*test_boolean*",
"*x == 3*",
])
def test_direct_gives_error(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True)
def test_skip1():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 error*",
])
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
])
def test_xfail_test_setup_exception(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
0 / 0
""")
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
result = testdir.runpytest(p)
assert result.ret == 0
assert 'xfailed' in result.stdout.str()
assert 'xpassed' not in result.stdout.str()
def test_imperativeskip_on_xfail_test(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_that_fails():
assert 0
@pytest.mark.skipif("True")
def test_hello():
pass
""")
testdir.makeconftest("""
import pytest
def pytest_runtest_setup(item):
pytest.skip("abc")
""")
result = testdir.runpytest("-rsxX")
result.stdout.fnmatch_lines_random("""
*SKIP*abc*
*SKIP*condition: True*
*2 skipped*
""")
class TestBooleanCondition:
def test_skipif(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True, reason="True123")
def test_func1():
pass
@pytest.mark.skipif(False, reason="True123")
def test_func2():
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*1 skipped*
""")
def test_skipif_noreason(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skipif(True)
def test_func():
pass
""")
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines("""
*1 error*
""")
def test_xfail(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail(True, reason="True123")
def test_func():
assert 0
""")
result = testdir.runpytest("-rxs")
result.stdout.fnmatch_lines("""
*XFAIL*
*True123*
*1 xfail*
""")
def test_xfail_item(testdir):
# Ensure pytest.xfail works with non-Python Item
testdir.makeconftest("""
import pytest
class MyItem(pytest.Item):
nodeid = 'foo'
def runtest(self):
pytest.xfail("Expected Failure")
def pytest_collect_file(path, parent):
return MyItem("foo", parent)
""")
result = testdir.inline_run()
passed, skipped, failed = result.listoutcomes()
assert not failed
xfailed = [r for r in skipped if hasattr(r, 'wasxfail')]
assert xfailed
| JonathonSonesen/pytest | testing/test_skipping.py | Python | mit | 27,803 |
import statsmodels.api as sm
from load_macrodata import dta
cf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[["infl", "unemp"]])
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cf_cycles.plot(ax=ax, style=['r--', 'b-'])
| statsmodels/statsmodels.github.io | v0.11.1/plots/cff_plot.py | Python | bsd-3-clause | 233 |
from distutils.core import setup
try:
import sys
doc = ""
if "sdist" in sys.argv:
import threadio
doc = threadio.__doc__
while "[HIDE]" in doc:
a, _, c = doc.partition("[HIDE]")
doc = a + c.partition("[/HIDE]")[2]
except ImportError:
pass
setup(
name="threadio",
version="0.1",
author="EcmaXp",
author_email="[email protected]",
description=(doc.strip().splitlines() or [""]).pop(0).strip(),
long_description=doc.strip(),
py_modules=["threadio"],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| EcmaXp/threadio | setup.py | Python | mit | 870 |
saturday = True
sunday = False
if saturday or sunday:
print("Ok, you can sleep in")
team = input("Enter your favourite hockey team; ").upper()
sport = input("Enter your favourite sport: ").upper()
if sport == "FOOTBALL" and team == "BARCELONA":
print("putu amo")
elif team == "MADRID" or team =="ATLETICO":
print("FCK")
else:
print("putu looser")
| aesquis/Introduccion-a-la-programacion-con-Python | 07-Decisiones-complejas-con-codigo/examples3or.py | Python | gpl-2.0 | 367 |
# -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.osv import expression
from openerp.tools import float_is_zero
from openerp.tools import float_compare, float_round
from openerp.tools.misc import formatLang
from openerp.exceptions import UserError, ValidationError
import time
import math
class AccountCashboxLine(models.Model):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'coin_value'
_order = 'coin_value'
@api.one
@api.depends('coin_value', 'number')
def _sub_total(self):
""" Calculates Sub total"""
self.subtotal = self.coin_value * self.number
coin_value = fields.Float(string='Coin/Bill Value', required=True, digits=0)
number = fields.Integer(string='Number of Coins/Bills', help='Opening Unit Numbers')
subtotal = fields.Float(compute='_sub_total', string='Subtotal', digits=0, readonly=True)
cashbox_id = fields.Many2one('account.bank.statement.cashbox', string="Cashbox")
class AccountBankStmtCashWizard(models.Model):
"""
Account Bank Statement popup that allows entering cash details.
"""
_name = 'account.bank.statement.cashbox'
_description = 'Account Bank Statement Cashbox Details'
cashbox_lines_ids = fields.One2many('account.cashbox.line', 'cashbox_id', string='Cashbox Lines')
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('bank_statement_id', False) or self.env.context.get('active_id', False)
bnk_stmt = self.env['account.bank.statement'].browse(bnk_stmt_id)
total = 0.0
for lines in self.cashbox_lines_ids:
total += lines.subtotal
if self.env.context.get('balance', False) == 'start':
#starting balance
bnk_stmt.write({'balance_start': total, 'cashbox_start_id': self.id})
else:
#closing balance
bnk_stmt.write({'balance_end_real': total, 'cashbox_end_id': self.id})
return {'type': 'ir.actions.act_window_close'}
class AccountBankStmtCloseCheck(models.TransientModel):
"""
Account Bank Statement wizard that check that closing balance is correct.
"""
_name = 'account.bank.statement.closebalance'
_description = 'Account Bank Statement closing balance'
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('active_id', False)
if bnk_stmt_id:
self.env['account.bank.statement'].browse(bnk_stmt_id).button_confirm_bank()
return {'type': 'ir.actions.act_window_close'}
class AccountBankStatement(models.Model):
@api.one
@api.depends('line_ids', 'balance_start', 'line_ids.amount', 'balance_end_real')
def _end_balance(self):
self.total_entry_encoding = sum([line.amount for line in self.line_ids])
self.balance_end = self.balance_start + self.total_entry_encoding
self.difference = self.balance_end_real - self.balance_end
@api.multi
def _is_difference_zero(self):
for bank_stmt in self:
bank_stmt.is_difference_zero = float_is_zero(bank_stmt.difference, precision_digits=bank_stmt.currency_id.decimal_places)
@api.one
@api.depends('journal_id')
def _compute_currency(self):
self.currency_id = self.journal_id.currency_id or self.company_id.currency_id
@api.one
@api.depends('line_ids.journal_entry_ids')
def _check_lines_reconciled(self):
self.all_lines_reconciled = all([line.journal_entry_ids.ids or line.account_id.id for line in self.line_ids])
@api.model
def _default_journal(self):
journal_type = self.env.context.get('journal_type', False)
company_id = self.env['res.company']._company_default_get('account.bank.statement').id
if journal_type:
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
return journals[0]
return False
@api.multi
def _get_opening_balance(self, journal_id):
last_bnk_stmt = self.search([('journal_id', '=', journal_id)], limit=1)
if last_bnk_stmt:
return last_bnk_stmt.balance_end
return 0
@api.multi
def _set_opening_balance(self, journal_id):
self.balance_start = self._get_opening_balance(journal_id)
@api.model
def _default_opening_balance(self):
#Search last bank statement and set current opening balance as closing balance of previous one
journal_id = self._context.get('default_journal_id', False) or self._context.get('journal_id', False)
if journal_id:
return self._get_opening_balance(journal_id)
return 0
_name = "account.bank.statement"
_description = "Bank Statement"
_order = "date desc, id desc"
_inherit = ['mail.thread']
name = fields.Char(string='Reference', states={'open': [('readonly', False)]}, copy=False, readonly=True)
date = fields.Date(required=True, states={'confirm': [('readonly', True)]}, select=True, copy=False, default=fields.Date.context_today)
date_done = fields.Datetime(string="Closed On")
balance_start = fields.Monetary(string='Starting Balance', states={'confirm': [('readonly', True)]}, default=_default_opening_balance)
balance_end_real = fields.Monetary('Ending Balance', states={'confirm': [('readonly', True)]})
state = fields.Selection([('open', 'New'), ('confirm', 'Validated')], string='Status', required=True, readonly=True, copy=False, default='open')
currency_id = fields.Many2one('res.currency', compute='_compute_currency', oldname='currency', string="Currency")
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'confirm': [('readonly', True)]}, default=_default_journal)
journal_type = fields.Selection(related='journal_id.type', help="Technical field used for usability purposes")
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env['res.company']._company_default_get('account.bank.statement'))
total_entry_encoding = fields.Monetary('Transactions Subtotal', compute='_end_balance', store=True, help="Total of transaction lines.")
balance_end = fields.Monetary('Computed Balance', compute='_end_balance', store=True, help='Balance as calculated based on Opening Balance and transaction lines')
difference = fields.Monetary(compute='_end_balance', store=True, help="Difference between the computed ending balance and the specified ending balance.")
line_ids = fields.One2many('account.bank.statement.line', 'statement_id', string='Statement lines', states={'confirm': [('readonly', True)]}, copy=True)
move_line_ids = fields.One2many('account.move.line', 'statement_id', string='Entry lines', states={'confirm': [('readonly', True)]})
all_lines_reconciled = fields.Boolean(compute='_check_lines_reconciled')
user_id = fields.Many2one('res.users', string='Responsible', required=False, default=lambda self: self.env.user)
cashbox_start_id = fields.Many2one('account.bank.statement.cashbox', string="Starting Cashbox")
cashbox_end_id = fields.Many2one('account.bank.statement.cashbox', string="Ending Cashbox")
is_difference_zero = fields.Boolean(compute='_is_difference_zero', string='Is zero', help="Check if difference is zero.")
@api.onchange('journal_id')
def onchange_journal_id(self):
self._set_opening_balance(self.journal_id.id)
@api.multi
def _balance_check(self):
for stmt in self:
if not stmt.currency_id.is_zero(stmt.difference):
if stmt.journal_type == 'cash':
if stmt.difference < 0.0:
account = stmt.journal_id.loss_account_id
name = _('Loss')
else:
# statement.difference > 0.0
account = stmt.journal_id.profit_account_id
name = _('Profit')
if not account:
raise UserError(_('There is no account defined on the journal %s for %s involved in a cash difference.') % (stmt.journal_id.name, name))
values = {
'statement_id': stmt.id,
'account_id': account.id,
'amount': stmt.difference,
'name': _("Cash difference observed during the counting (%s)") % name,
}
self.env['account.bank.statement.line'].create(values)
else:
balance_end_real = formatLang(self.env, stmt.balance_end_real, currency_obj=stmt.currency_id)
balance_end = formatLang(self.env, stmt.balance_end, currency_obj=stmt.currency_id)
raise UserError(_('The ending balance is incorrect !\nThe expected balance (%s) is different from the computed one. (%s)')
% (balance_end_real, balance_end))
return True
@api.model
def create(self, vals):
if not vals.get('name'):
journal_id = vals.get('journal_id', self._context.get('default_journal_id', False))
journal = self.env['account.journal'].browse(journal_id)
vals['name'] = journal.sequence_id.with_context(ir_sequence_date=vals.get('date')).next_by_id()
return super(AccountBankStatement, self).create(vals)
@api.multi
def unlink(self):
for statement in self:
if statement.state != 'open':
raise UserError(_('In order to delete a bank statement, you must first cancel it to delete related journal items.'))
# Explicitly unlink bank statement lines so it will check that the related journal entries have been deleted first
statement.line_ids.unlink()
return super(AccountBankStatement, self).unlink()
@api.multi
def open_cashbox_id(self):
context = dict(self.env.context or {})
if context.get('cashbox_id'):
context['active_id'] = self.id
return {
'name': _('Cash Control'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.bank.statement.cashbox',
'view_id': self.env.ref('account.view_account_bnk_stmt_cashbox').id,
'type': 'ir.actions.act_window',
'res_id': self.env.context.get('cashbox_id'),
'context': context,
'target': 'new'
}
@api.multi
def button_cancel(self):
for statement in self:
if any(line.journal_entry_ids.ids for line in statement.line_ids):
raise UserError(_('A statement cannot be canceled when its lines are reconciled.'))
self.state = 'open'
@api.multi
def check_confirm_bank(self):
if self.journal_type == 'cash' and not self.currency_id.is_zero(self.difference):
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_view_account_bnk_stmt_check')
if action_rec:
action = action_rec.read([])[0]
return action
return self.button_confirm_bank()
@api.multi
def button_confirm_bank(self):
self._balance_check()
statements = self.filtered(lambda r: r.state == 'open')
for statement in statements:
moves = self.env['account.move']
for st_line in statement.line_ids:
if st_line.account_id and not st_line.journal_entry_ids.ids:
st_line.fast_counterpart_creation()
elif not st_line.journal_entry_ids.ids:
raise UserError(_('All the account entries lines must be processed in order to close the statement.'))
moves = (moves | st_line.journal_entry_ids)
if moves:
moves.post()
statement.message_post(body=_('Statement %s confirmed, journal items were created.') % (statement.name,))
statements.link_bank_to_partner()
statements.write({'state': 'confirm', 'date_done': time.strftime("%Y-%m-%d %H:%M:%S")})
@api.multi
def button_journal_entries(self):
context = dict(self._context or {})
context['journal_id'] = self.journal_id.id
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('statement_id', 'in', self.ids)],
'context': context,
}
@api.multi
def button_open(self):
""" Changes statement state to Running."""
for statement in self:
if not statement.name:
context = {'ir_sequence_date', statement.date}
if statement.journal_id.sequence_id:
st_number = statement.journal_id.sequence_id.with_context(context).next_by_id()
else:
SequenceObj = self.env['ir.sequence']
st_number = SequenceObj.with_context(context).next_by_code('account.bank.statement')
statement.name = st_number
statement.state = 'open'
@api.multi
def reconciliation_widget_preprocess(self):
""" Get statement lines of the specified statements or all unreconciled statement lines and try to automatically reconcile them / find them a partner.
Return ids of statement lines left to reconcile and other data for the reconciliation widget.
"""
statements = self
bsl_obj = self.env['account.bank.statement.line']
# NB : The field account_id can be used at the statement line creation/import to avoid the reconciliation process on it later on,
# this is why we filter out statements lines where account_id is set
st_lines_filter = [('journal_entry_ids', '=', False), ('account_id', '=', False)]
if statements:
st_lines_filter += [('statement_id', 'in', statements.ids)]
# Try to automatically reconcile statement lines
automatic_reconciliation_entries = []
st_lines_left = self.env['account.bank.statement.line']
for st_line in bsl_obj.search(st_lines_filter):
res = st_line.auto_reconcile()
if not res:
st_lines_left = (st_lines_left | st_line)
else:
automatic_reconciliation_entries.append(res.ids)
# Try to set statement line's partner
for st_line in st_lines_left:
if st_line.name and not st_line.partner_id:
additional_domain = [('ref', '=', st_line.name)]
match_recs = st_line.get_move_lines_for_reconciliation(limit=1, additional_domain=additional_domain, overlook_partner=True)
if match_recs and match_recs[0].partner_id:
st_line.write({'partner_id': match_recs[0].partner_id.id})
# Collect various informations for the reconciliation widget
notifications = []
num_auto_reconciled = len(automatic_reconciliation_entries)
if num_auto_reconciled > 0:
auto_reconciled_message = num_auto_reconciled > 1 \
and _("%d transactions were automatically reconciled.") % num_auto_reconciled \
or _("1 transaction was automatically reconciled.")
notifications += [{
'type': 'info',
'message': auto_reconciled_message,
'details': {
'name': _("Automatically reconciled items"),
'model': 'account.move',
'ids': automatic_reconciliation_entries
}
}]
lines = []
for el in statements:
lines.extend(el.line_ids.ids)
lines = list(set(lines))
return {
'st_lines_ids': st_lines_left.ids,
'notifications': notifications,
'statement_name': len(statements) == 1 and statements[0].name or False,
'num_already_reconciled_lines': statements and bsl_obj.search_count([('journal_entry_ids', '!=', False), ('id', 'in', lines)]) or 0,
}
@api.multi
def link_bank_to_partner(self):
for statement in self:
for st_line in statement.line_ids:
if st_line.bank_account_id and st_line.partner_id and st_line.bank_account_id.partner_id != st_line.partner_id:
st_line.bank_account_id.partner_id = st_line.partner_id
class AccountBankStatementLine(models.Model):
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_order = "statement_id desc, sequence"
_inherit = ['ir.needaction_mixin']
name = fields.Char(string='Memo', required=True)
date = fields.Date(required=True, default=lambda self: self._context.get('date', fields.Date.context_today(self)))
amount = fields.Monetary(digits=0, currency_field='journal_currency_id')
journal_currency_id = fields.Many2one('res.currency', related='statement_id.currency_id',
help='Utility field to express amount currency', readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner')
bank_account_id = fields.Many2one('res.partner.bank', string='Bank Account')
account_id = fields.Many2one('account.account', string='Counterpart Account', domain=[('deprecated', '=', False)],
help="This technical field can be used at the statement line creation/import time in order to avoid the reconciliation"
" process on it later on. The statement line will simply create a counterpart on this account")
statement_id = fields.Many2one('account.bank.statement', string='Statement', index=True, required=True, ondelete='cascade')
journal_id = fields.Many2one('account.journal', related='statement_id.journal_id', string='Journal', store=True, readonly=True)
partner_name = fields.Char(help="This field is used to record the third party name when importing bank statement in electronic format,"
" when the partner doesn't exist yet in the database (or cannot be found).")
ref = fields.Char(string='Reference')
note = fields.Text(string='Notes')
sequence = fields.Integer(index=True, help="Gives the sequence order when displaying a list of bank statement lines.", default=1)
company_id = fields.Many2one('res.company', related='statement_id.company_id', string='Company', store=True, readonly=True)
journal_entry_ids = fields.One2many('account.move', 'statement_line_id', 'Journal Entries', copy=False, readonly=True)
amount_currency = fields.Monetary(help="The amount expressed in an optional other currency if it is a multi-currency entry.")
currency_id = fields.Many2one('res.currency', string='Currency', help="The optional other currency if it is a multi-currency entry.")
@api.one
@api.constrains('amount')
def _check_amount(self):
# This constraint could possibly underline flaws in bank statement import (eg. inability to
# support hacks such as using dummy transactions to give additional informations)
if self.amount == 0:
raise ValidationError(_('A transaction can\'t have a 0 amount.'))
@api.one
@api.constrains('amount', 'amount_currency')
def _check_amount_currency(self):
if self.amount_currency != 0 and self.amount == 0:
raise ValidationError(_('If "Amount Currency" is specified, then "Amount" must be as well.'))
@api.multi
def unlink(self):
for line in self:
if line.journal_entry_ids.ids:
raise UserError(_('In order to delete a bank statement line, you must first cancel it to delete related journal items.'))
return super(AccountBankStatementLine, self).unlink()
@api.model
def _needaction_domain_get(self):
return [('journal_entry_ids', '=', False), ('account_id', '=', False)]
@api.multi
def button_cancel_reconciliation(self):
# TOCKECK : might not behave as expected in case of reconciliations (match statement line with already
# registered payment) or partial reconciliations : it will completely remove the existing payment.
move_recs = self.env['account.move']
for st_line in self:
move_recs = (move_recs | st_line.journal_entry_ids)
if move_recs:
for move in move_recs:
move.line_ids.remove_move_reconcile()
move_recs.write({'statement_line_id': False})
move_recs.button_cancel()
move_recs.unlink()
####################################################
# Reconciliation interface methods
####################################################
@api.multi
def get_data_for_reconciliation_widget(self, excluded_ids=None):
""" Returns the data required to display a reconciliation widget, for each statement line in self """
excluded_ids = excluded_ids or []
ret = []
for st_line in self:
aml_recs = st_line.get_reconciliation_proposition(excluded_ids=excluded_ids)
target_currency = st_line.currency_id or st_line.journal_id.currency_id or st_line.journal_id.company_id.currency_id
rp = aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=st_line.date)
excluded_ids += [move_line['id'] for move_line in rp]
ret.append({
'st_line': st_line.get_statement_line_for_reconciliation_widget(),
'reconciliation_proposition': rp
})
return ret
def get_statement_line_for_reconciliation_widget(self):
""" Returns the data required by the bank statement reconciliation widget to display a statement line """
statement_currency = self.journal_id.currency_id or self.journal_id.company_id.currency_id
if self.amount_currency and self.currency_id:
amount = self.amount_currency
amount_currency = self.amount
amount_currency_str = amount_currency > 0 and amount_currency or -amount_currency
amount_currency_str = formatLang(self.env, amount_currency_str, currency_obj=statement_currency)
else:
amount = self.amount
amount_currency_str = ""
amount_str = formatLang(self.env, abs(amount), currency_obj=self.currency_id or statement_currency)
data = {
'id': self.id,
'ref': self.ref,
'note': self.note or "",
'name': self.name,
'date': self.date,
'amount': amount,
'amount_str': amount_str, # Amount in the statement line currency
'currency_id': self.currency_id.id or statement_currency.id,
'partner_id': self.partner_id.id,
'journal_id': self.journal_id.id,
'statement_id': self.statement_id.id,
'account_code': self.journal_id.default_debit_account_id.code,
'account_name': self.journal_id.default_debit_account_id.name,
'partner_name': self.partner_id.name,
'communication_partner_name': self.partner_name,
'amount_currency_str': amount_currency_str, # Amount in the statement currency
'has_no_partner': not self.partner_id.id,
}
if self.partner_id:
if amount > 0:
data['open_balance_account_id'] = self.partner_id.property_account_receivable_id.id
else:
data['open_balance_account_id'] = self.partner_id.property_account_payable_id.id
return data
@api.multi
def get_move_lines_for_reconciliation_widget(self, excluded_ids=None, str=False, offset=0, limit=None):
""" Returns move lines for the bank statement reconciliation widget, formatted as a list of dicts
"""
aml_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str, offset=offset, limit=limit)
target_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
return aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=self.date)
####################################################
# Reconciliation methods
####################################################
def get_move_lines_for_reconciliation(self, excluded_ids=None, str=False, offset=0, limit=None, additional_domain=None, overlook_partner=False):
""" Return account.move.line records which can be used for bank statement reconciliation.
:param excluded_ids:
:param str:
:param offset:
:param limit:
:param additional_domain:
:param overlook_partner:
"""
# Domain to fetch registered payments (use case where you encode the payment before you get the bank statement)
reconciliation_aml_accounts = [self.journal_id.default_credit_account_id.id, self.journal_id.default_debit_account_id.id]
domain_reconciliation = ['&', ('statement_id', '=', False), ('account_id', 'in', reconciliation_aml_accounts)]
# Domain to fetch unreconciled payables/receivables (use case where you close invoices/refunds by reconciling your bank statements)
domain_matching = [('reconciled', '=', False)]
if self.partner_id.id or overlook_partner:
domain_matching = expression.AND([domain_matching, [('account_id.internal_type', 'in', ['payable', 'receivable'])]])
else:
# TODO : find out what use case this permits (match a check payment, registered on a journal whose account type is other instead of liquidity)
domain_matching = expression.AND([domain_matching, [('account_id.reconcile', '=', True)]])
# Let's add what applies to both
domain = expression.OR([domain_reconciliation, domain_matching])
if self.partner_id.id and not overlook_partner:
domain = expression.AND([domain, [('partner_id', '=', self.partner_id.id)]])
# Domain factorized for all reconciliation use cases
ctx = dict(self._context or {})
ctx['bank_statement_line'] = self
generic_domain = self.env['account.move.line'].with_context(ctx).domain_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str)
domain = expression.AND([domain, generic_domain])
# Domain from caller
if additional_domain is None:
additional_domain = []
else:
additional_domain = expression.normalize_domain(additional_domain)
domain = expression.AND([domain, additional_domain])
return self.env['account.move.line'].search(domain, offset=offset, limit=limit, order="date_maturity asc, id asc")
def _get_domain_maker_move_line_amount(self):
""" Returns a function that can create the appropriate domain to search on move.line amount based on statement.line currency/amount """
company_currency = self.journal_id.company_id.currency_id
st_line_currency = self.currency_id or self.journal_id.currency_id
currency = (st_line_currency and st_line_currency != company_currency) and st_line_currency.id or False
field = currency and 'amount_residual_currency' or 'amount_residual'
precision = st_line_currency and st_line_currency.decimal_places or company_currency.decimal_places
def ret(comparator, amount, p=precision, f=field, c=currency):
if comparator == '<':
if amount < 0:
domain = [(f, '<', 0), (f, '>', amount)]
else:
domain = [(f, '>', 0), (f, '<', amount)]
elif comparator == '=':
domain = [(f, '=', float_round(amount, precision_digits=p))]
else:
raise UserError(_("Programmation error : domain_maker_move_line_amount requires comparator '=' or '<'"))
domain += [('currency_id', '=', c)]
return domain
return ret
def get_reconciliation_proposition(self, excluded_ids=None):
""" Returns move lines that constitute the best guess to reconcile a statement line
Note: it only looks for move lines in the same currency as the statement line.
"""
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
elif len(match_recs) == 0:
move = self.env['account.move'].search([('name', '=', self.name)], limit=1)
if move:
domain = [('move_id', '=', move.id)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
amount = self.amount_currency or self.amount
# Look for a single move line with the same amount
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=1, additional_domain=amount_domain_maker('=', amount))
if match_recs:
return match_recs
if not self.partner_id:
return self.env['account.move.line']
# Select move lines until their total amount is greater than the statement line amount
domain = [('reconciled', '=', False)]
domain += [('account_id.user_type_id.type', '=', amount > 0 and 'receivable' or 'payable')] # Make sure we can't mix receivable and payable
domain += amount_domain_maker('<', amount) # Will also enforce > 0
mv_lines = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=5, additional_domain=domain)
st_line_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
ret = self.env['account.move.line']
total = 0
for line in mv_lines:
total += line.currency_id and line.amount_residual_currency or line.amount_residual
if float_compare(total, abs(amount), precision_digits=st_line_currency.rounding) != -1:
break
ret = (ret | line)
return ret
def _get_move_lines_for_auto_reconcile(self):
""" Returns the move lines that the method auto_reconcile can use to try to reconcile the statement line """
pass
@api.multi
def auto_reconcile(self):
""" Try to automatically reconcile the statement.line ; return the counterpart journal entry/ies if the automatic reconciliation succeeded, False otherwise.
TODO : this method could be greatly improved and made extensible
"""
self.ensure_one()
match_recs = self.env['account.move.line']
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
equal_amount_domain = amount_domain_maker('=', self.amount_currency or self.amount)
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = equal_amount_domain + [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) != 1:
return False
# Look for a single move line with the same partner, the same amount
if not match_recs:
if self.partner_id:
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=equal_amount_domain)
if match_recs and len(match_recs) != 1:
return False
if not match_recs:
return False
# Now reconcile
counterpart_aml_dicts = []
payment_aml_rec = self.env['account.move.line']
for aml in match_recs:
if aml.account_id.internal_type == 'liquidity':
payment_aml_rec = (payment_aml_rec | aml)
else:
amount = aml.currency_id and aml.amount_residual_currency or aml.amount_residual
counterpart_aml_dicts.append({
'name': aml.name if aml.name != '/' else aml.move_id.name,
'debit': amount < 0 and -amount or 0,
'credit': amount > 0 and amount or 0,
'move_line': aml
})
try:
with self._cr.savepoint():
counterpart = self.process_reconciliation(counterpart_aml_dicts=counterpart_aml_dicts, payment_aml_rec=payment_aml_rec)
return counterpart
except UserError:
# A configuration / business logic error that makes it impossible to auto-reconcile should not be raised
# since automatic reconciliation is just an amenity and the user will get the same exception when manually
# reconciling. Other types of exception are (hopefully) programmation errors and should cause a stacktrace.
self.invalidate_cache()
self.env['account.move'].invalidate_cache()
self.env['account.move.line'].invalidate_cache()
return False
def _prepare_reconciliation_move(self, move_name):
""" Prepare the dict of values to create the move from a statement line. This method may be overridden to adapt domain logic
through model inheritance (make sure to call super() to establish a clean extension chain).
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'statement_line_id': self.id,
'journal_id': self.statement_id.journal_id.id,
'date': self.date,
'name': move_name,
'ref': self.ref,
}
def _prepare_reconciliation_move_line(self, move, amount):
""" Prepare the dict of values to create the move line from a statement line.
:param recordset move: the account.move to link the move line
:param float amount: the amount of transaction that wasn't already reconciled
"""
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
amount_currency = False
if statement_currency != company_currency or st_line_currency != company_currency:
# First get the ratio total mount / amount not already reconciled
if statement_currency == company_currency:
total_amount = self.amount
elif st_line_currency == company_currency:
total_amount = self.amount_currency
else:
total_amount = statement_currency.with_context({'date': self.date}).compute(self.amount, company_currency)
ratio = total_amount / amount
# Then use it to adjust the statement.line field that correspond to the move.line amount_currency
if statement_currency != company_currency:
amount_currency = self.amount * ratio
elif st_line_currency != company_currency:
amount_currency = self.amount_currency * ratio
return {
'name': self.name,
'date': self.date,
'ref': self.ref,
'move_id': move.id,
'partner_id': self.partner_id and self.partner_id.id or False,
'account_id': amount >= 0 \
and self.statement_id.journal_id.default_credit_account_id.id \
or self.statement_id.journal_id.default_debit_account_id.id,
'credit': amount < 0 and -amount or 0.0,
'debit': amount > 0 and amount or 0.0,
'statement_id': self.statement_id.id,
'journal_id': self.statement_id.journal_id.id,
'currency_id': statement_currency != company_currency and statement_currency.id or (st_line_currency != company_currency and st_line_currency.id or False),
'amount_currency': amount_currency,
}
@api.v7
def process_reconciliations(self, cr, uid, ids, data, context=None):
""" Handles data sent from the bank statement reconciliation widget (and can otherwise serve as an old-API bridge)
:param list of dicts data: must contains the keys 'counterpart_aml_dicts', 'payment_aml_ids' and 'new_aml_dicts',
whose value is the same as described in process_reconciliation except that ids are used instead of recordsets.
"""
aml_obj = self.pool['account.move.line']
for id, datum in zip(ids, data):
st_line = self.browse(cr, uid, id, context)
payment_aml_rec = aml_obj.browse(cr, uid, datum.get('payment_aml_ids', []), context)
for aml_dict in datum.get('counterpart_aml_dicts', []):
aml_dict['move_line'] = aml_obj.browse(cr, uid, aml_dict['counterpart_aml_id'], context)
del aml_dict['counterpart_aml_id']
st_line.process_reconciliation(datum.get('counterpart_aml_dicts', []), payment_aml_rec, datum.get('new_aml_dicts', []))
def fast_counterpart_creation(self):
for st_line in self:
# Technical functionality to automatically reconcile by creating a new move line
vals = {
'name': st_line.name,
'debit': st_line.amount < 0 and -st_line.amount or 0.0,
'credit': st_line.amount > 0 and st_line.amount or 0.0,
'account_id': st_line.account_id.id,
}
st_line.process_reconciliation(new_aml_dicts=[vals])
def process_reconciliation(self, counterpart_aml_dicts=None, payment_aml_rec=None, new_aml_dicts=None):
""" Match statement lines with existing payments (eg. checks) and/or payables/receivables (eg. invoices and refunds) and/or new move lines (eg. write-offs).
If any new journal item needs to be created (via new_aml_dicts or counterpart_aml_dicts), a new journal entry will be created and will contain those
items, as well as a journal item for the bank statement line.
Finally, mark the statement line as reconciled by putting the matched moves ids in the column journal_entry_ids.
:param (list of dicts) counterpart_aml_dicts: move lines to create to reconcile with existing payables/receivables.
The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'move_line'
# The move line to reconcile (partially if specified debit/credit is lower than move line's credit/debit)
:param (list of recordsets) payment_aml_rec: recordset move lines representing existing payments (which are already fully reconciled)
:param (list of dicts) new_aml_dicts: move lines to create. The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'account_id'
- (optional) 'tax_ids'
- (optional) Other account.move.line fields like analytic_account_id or analytics_id
:returns: The journal entries with which the transaction was matched. If there was at least an entry in counterpart_aml_dicts or new_aml_dicts, this list contains
the move created by the reconciliation, containing entries for the statement.line (1), the counterpart move lines (0..*) and the new move lines (0..*).
"""
counterpart_aml_dicts = counterpart_aml_dicts or []
payment_aml_rec = payment_aml_rec or self.env['account.move.line']
new_aml_dicts = new_aml_dicts or []
aml_obj = self.env['account.move.line']
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
counterpart_moves = self.env['account.move']
# Check and prepare received data
if self.journal_entry_ids.ids:
raise UserError(_('The bank statement line was already reconciled.'))
if any(rec.statement_id for rec in payment_aml_rec):
raise UserError(_('A selected move line was already reconciled.'))
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].reconciled:
raise UserError(_('A selected move line was already reconciled.'))
if isinstance(aml_dict['move_line'], (int, long)):
aml_dict['move_line'] = aml_obj.browse(aml_dict['move_line'])
for aml_dict in (counterpart_aml_dicts + new_aml_dicts):
if aml_dict.get('tax_ids') and aml_dict['tax_ids'] and isinstance(aml_dict['tax_ids'][0], (int, long)):
# Transform the value in the format required for One2many and Many2many fields
aml_dict['tax_ids'] = map(lambda id: (4, id, None), aml_dict['tax_ids'])
# Fully reconciled moves are just linked to the bank statement
for aml_rec in payment_aml_rec:
aml_rec.write({'statement_id': self.statement_id.id})
aml_rec.move_id.write({'statement_line_id': self.id})
counterpart_moves = (counterpart_moves | aml_rec.move_id)
# Create move line(s). Either matching an existing journal entry (eg. invoice), in which
# case we reconcile the existing and the new move lines together, or being a write-off.
if counterpart_aml_dicts or new_aml_dicts:
st_line_currency = self.currency_id or statement_currency
st_line_currency_rate = self.currency_id and (self.amount_currency / self.amount) or False
# Create the move
move_name = (self.statement_id.name or self.name) + "/" + str(self.sequence)
move_vals = self._prepare_reconciliation_move(move_name)
move = self.env['account.move'].create(move_vals)
counterpart_moves = (counterpart_moves | move)
# Complete dicts to create both counterpart move lines and write-offs
to_create = (counterpart_aml_dicts + new_aml_dicts)
ctx = dict(self._context, date=self.date)
for aml_dict in to_create:
aml_dict['move_id'] = move.id
aml_dict['partner_id'] = self.partner_id.id
aml_dict['statement_id'] = self.statement_id.id
if st_line_currency.id != company_currency.id:
aml_dict['amount_currency'] = aml_dict['debit'] - aml_dict['credit']
aml_dict['currency_id'] = st_line_currency.id
if self.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:
# Statement is in company currency but the transaction is in foreign currency
aml_dict['debit'] = company_currency.round(aml_dict['debit'] / st_line_currency_rate)
aml_dict['credit'] = company_currency.round(aml_dict['credit'] / st_line_currency_rate)
elif self.currency_id and st_line_currency_rate:
# Statement is in foreign currency and the transaction is in another one
aml_dict['debit'] = statement_currency.with_context(ctx).compute(aml_dict['debit'] / st_line_currency_rate, company_currency)
aml_dict['credit'] = statement_currency.with_context(ctx).compute(aml_dict['credit'] / st_line_currency_rate, company_currency)
else:
# Statement is in foreign currency and no extra currency is given for the transaction
aml_dict['debit'] = st_line_currency.with_context(ctx).compute(aml_dict['debit'], company_currency)
aml_dict['credit'] = st_line_currency.with_context(ctx).compute(aml_dict['credit'], company_currency)
elif statement_currency.id != company_currency.id:
# Statement is in foreign currency but the transaction is in company currency
prorata_factor = (aml_dict['debit'] - aml_dict['credit']) / self.amount_currency
aml_dict['amount_currency'] = prorata_factor * self.amount
aml_dict['currency_id'] = statement_currency.id
# Create the move line for the statement line using the total credit/debit of the counterpart
# This leaves out the amount already reconciled and avoids rounding errors from currency conversion
st_line_amount = sum(aml_dict['credit'] - aml_dict['debit'] for aml_dict in to_create)
aml_obj.with_context(check_move_validity=False).create(self._prepare_reconciliation_move_line(move, st_line_amount))
# Create write-offs
for aml_dict in new_aml_dicts:
aml_obj.with_context(check_move_validity=False).create(aml_dict)
# Create counterpart move lines and reconcile them
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].partner_id.id:
aml_dict['partner_id'] = aml_dict['move_line'].partner_id.id
aml_dict['account_id'] = aml_dict['move_line'].account_id.id
counterpart_move_line = aml_dict.pop('move_line')
if counterpart_move_line.currency_id and counterpart_move_line.currency_id != company_currency and not aml_dict.get('currency_id'):
aml_dict['currency_id'] = counterpart_move_line.currency_id.id
aml_dict['amount_currency'] = company_currency.with_context(ctx).compute(aml_dict['debit'] - aml_dict['credit'], counterpart_move_line.currency_id)
new_aml = aml_obj.with_context(check_move_validity=False).create(aml_dict)
(new_aml | counterpart_move_line).reconcile()
move.post()
counterpart_moves.assert_balanced()
return counterpart_moves
| web30s/odoo-9.0c-20160402 | hello/templates/openerp/addons/account/models/account_bank_statement.py | Python | gpl-3.0 | 47,259 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TrinityX documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 25 14:04:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'rinoh.frontend.sphinx',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TrinityX'
copyright = '2020, ClusterVision Solutions BV'
author = 'ClusterVision Solutions BV'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '12'
# The full version, including alpha/beta/rc tags.
release = '12.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'none'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
#html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = 'TrinityX r12'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'trinityxlogo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TrinityXdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TrinityX.tex', 'TrinityX Documentation',
'ClusterVision Solutions BV', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'trinityx', 'TrinityX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TrinityX', 'TrinityX Documentation',
author, 'TrinityX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| clustervision/trinityX | doc/conf.py | Python | gpl-2.0 | 9,991 |
# Natural Language Toolkit: Third-Party Contributions
# Contributions from the Massachusetts
# Institute of Technology
#
# Copyright (C) 2004 The original contributors
# URL: <http://nltk.sf.net>
#
# $Id: __init__.py,v 1.1 2004/03/09 05:06:28 stevenbird Exp $
"""
Contributions to NLTK made by students at Massachusetts
Institute of Technology
"""
| ronaldahmed/SLAM-for-ugv | neural-navigation-with-lstm/MARCO/nltk_contrib/mit/__init__.py | Python | mit | 349 |
# This file is distributed under the terms of the GNU General Public license.
# Copyright (C) 2019 Erik Ogenvik (See the file COPYING for details).
import server
from atlas import Operation, Entity, Oplist
from physics import Vector3D, Quaternion
from world.utils import Usage
def shoot_in_direction(direction, instance, res):
Usage.set_cooldown_on_attached(instance.tool, instance.actor)
direction.normalize()
# Adjust the start position of the projectile, so it's outside of the actor, at mid height
start_adjust = Vector3D(direction)
start_adjust.y = 0
start_adjust.normalize()
start_adjust.y = instance.actor.location.bbox.high_corner.y * 0.8
new_loc = instance.actor.location.copy()
new_loc.pos += start_adjust
new_loc.orientation = Quaternion(Vector3D(0, 0, 1), direction, Vector3D(1, 0, 0))
mode_data = {"mode": "projectile", "$eid": instance.actor.id}
# TODO: match with animation in client
res.append(instance.actor.start_action("wand/releasing", 1))
res.append(Operation("create",
Entity(parent="fireball", location=new_loc, velocity=direction * 60, mode="projectile",
mode_data=mode_data, damage_explosion=instance.tool.props.damage),
to=instance.tool.id))
def fireball(instance):
res = Oplist()
direction = instance.get_arg("direction", 0)
shoot_in_direction(direction, instance, res)
set_op = Operation("set", Entity(instance.tool.id, {"charges!subtract": 1}), to=instance.tool)
res.append(set_op)
return server.OPERATION_BLOCKED, res
def shoot_poison_in_direction(direction, instance, res):
Usage.set_cooldown_on_attached(instance.tool, instance.actor)
direction.normalize()
# Adjust the start position of the projectile, so it's outside of the actor, at mid height
start_adjust = Vector3D(direction)
start_adjust.y = 0
start_adjust.normalize()
start_adjust.y = instance.actor.location.bbox.high_corner.y * 0.8
new_loc = instance.actor.location.copy()
new_loc.pos += start_adjust
new_loc.orientation = Quaternion(Vector3D(0, 0, 1), direction, Vector3D(1, 0, 0))
mode_data = {"mode": "projectile", "$eid": instance.actor.id}
# TODO: match with animation in client
res.append(instance.actor.start_action("wand/releasing", 1))
res.append(Operation("create",
Entity(parent="poisonball", location=new_loc, velocity=direction * 60, mode="projectile",
mode_data=mode_data, damage_poison=instance.tool.props.damage),
to=instance.tool.id))
def poison(instance):
res = Oplist()
direction = instance.get_arg("direction", 0)
shoot_poison_in_direction(direction, instance, res)
set_op = Operation("set", Entity(instance.tool.id, {"charges!subtract": 1}), to=instance.tool)
res.append(set_op)
return server.OPERATION_BLOCKED, res
| worldforge/cyphesis | data/rulesets/deeds/scripts/world/objects/tools/Wand.py | Python | gpl-2.0 | 2,978 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
import abc
from builtins import object
from builtins import range
from functools import total_ordering
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta, urns.RunnerApiFn)):
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None, window=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
@total_ordering
class IntervalWindow(windowed_value._IntervalWindowBase, BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def intersects(self, other):
return other.start < self.end or self.start < other.end
def union(self, other):
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
@total_ordering
class TimestampedValue(object):
"""A timestamped value having a value and a timestamp.
Attributes:
value: The underlying value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
"""
def __init__(self, value, timestamp):
self.value = value
self.timestamp = Timestamp.of(timestamp)
def __eq__(self, other):
return (type(self) == type(other)
and self.value == other.value
and self.timestamp == other.timestamp)
def __hash__(self):
return hash((self.value, self.timestamp))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if type(self) != type(other):
return type(self).__name__ < type(other).__name__
if self.value != other.value:
return self.value < other.value
return self.timestamp < other.timestamp
class GlobalWindow(BoundedWindow):
"""The default window into which all data is placed (via GlobalWindows)."""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(GlobalWindow, cls).__new__(cls)
return cls._instance
def __init__(self):
super(GlobalWindow, self).__init__(GlobalWindow._getTimestampFromProto())
self.start = MIN_TIMESTAMP
def __repr__(self):
return 'GlobalWindow'
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windows are always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
@staticmethod
def _getTimestampFromProto():
ts_millis = int(
common_urns.constants.GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS.constant)
return Timestamp(micros=ts_millis*1000)
class NonMergingWindowFn(WindowFn):
def is_merging(self):
return False
def merge(self, merge_context):
pass # No merging.
class GlobalWindows(NonMergingWindowFn):
"""A windowing function that assigns everything to one global window."""
@classmethod
def windowed_value(cls, value, timestamp=MIN_TIMESTAMP):
return WindowedValue(value, timestamp, (GlobalWindow(),))
def assign(self, assign_context):
return [GlobalWindow()]
def get_window_coder(self):
return coders.GlobalWindowCoder()
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windowfn is always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return common_urns.global_windows.urn, None
@urns.RunnerApiFn.register_urn(common_urns.global_windows.urn, None)
def from_runner_api_parameter(unused_fn_parameter, unused_context):
return GlobalWindows()
class FixedWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to one time interval.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * size + offset, (N + 1) * size + offset)
Attributes:
size: Size of the window as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * size + offset where t=0 is the epoch. The offset must be a value
in range [0, size). If it is not it will be normalized to this range.
"""
def __init__(self, size, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.offset = Timestamp.of(offset) % self.size
def assign(self, context):
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.size
return [IntervalWindow(start, start + self.size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == FixedWindows:
return self.size == other.size and self.offset == other.offset
def __hash__(self):
return hash((self.size, self.offset))
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return (common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return FixedWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()))
class SlidingWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to a set of sliding windows.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * period + offset, N * period + offset + size)
Attributes:
size: Size of the window as seconds.
period: Period of the windows as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * period + offset where t=0 is the epoch. The offset must be a value
in range [0, period). If it is not it will be normalized to this range.
"""
def __init__(self, size, period, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.period = Duration.of(period)
self.offset = Timestamp.of(offset) % period
def assign(self, context):
timestamp = context.timestamp
start = timestamp - ((timestamp - self.offset) % self.period)
return [
IntervalWindow(Timestamp(micros=s), Timestamp(micros=s) + self.size)
for s in range(start.micros, timestamp.micros - self.size.micros,
-self.period.micros)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == SlidingWindows:
return (self.size == other.size
and self.offset == other.offset
and self.period == other.period)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.offset, self.period))
def to_runner_api_parameter(self, context):
return (common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros),
period=proto_utils.from_micros(
duration_pb2.Duration, self.period.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return SlidingWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()),
period=Duration(micros=fn_parameter.period.ToMicroseconds()))
class Sessions(WindowFn):
"""A windowing function that groups elements into sessions.
A session is defined as a series of consecutive events
separated by a specified gap size.
Attributes:
gap_size: Size of the gap between windows as floating-point seconds.
"""
def __init__(self, gap_size):
if gap_size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.gap_size = Duration.of(gap_size)
def assign(self, context):
timestamp = context.timestamp
return [IntervalWindow(timestamp, timestamp + self.gap_size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def merge(self, merge_context):
to_merge = []
end = MIN_TIMESTAMP
for w in sorted(merge_context.windows, key=lambda w: w.start):
if to_merge:
if end > w.start:
to_merge.append(w)
if w.end > end:
end = w.end
else:
if len(to_merge) > 1:
merge_context.merge(to_merge,
IntervalWindow(to_merge[0].start, end))
to_merge = [w]
end = w.end
else:
to_merge = [w]
end = w.end
if len(to_merge) > 1:
merge_context.merge(to_merge, IntervalWindow(to_merge[0].start, end))
def __eq__(self, other):
if type(self) == type(other) == Sessions:
return self.gap_size == other.gap_size
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.gap_size)
def to_runner_api_parameter(self, context):
return (common_urns.session_windows.urn,
standard_window_fns_pb2.SessionsPayload(
gap_size=proto_utils.from_micros(
duration_pb2.Duration, self.gap_size.micros)))
@urns.RunnerApiFn.register_urn(
common_urns.session_windows.urn,
standard_window_fns_pb2.SessionsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return Sessions(
gap_size=Duration(micros=fn_parameter.gap_size.ToMicroseconds()))
| mxm/incubator-beam | sdks/python/apache_beam/transforms/window.py | Python | apache-2.0 | 17,782 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import arating
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from django_extensions.db.fields import AutoSlugField
from l10n.models import Country
from base.fields import extra
from base.mixins import TimestampedModelMixin, UUIDModelMixin
from phonenumber_field.modelfields import PhoneNumberField
@python_2_unicode_compatible
class Station(TimestampedModelMixin, UUIDModelMixin, models.Model):
TYPE_CHOICES = (("stream", _("Stream")), ("djmon", _("DJ-Monitor")))
type = models.CharField(
verbose_name=_("Type"), max_length=12, default="stream", choices=TYPE_CHOICES
)
name = models.CharField(max_length=256, null=True, blank=True)
slug = AutoSlugField(populate_from="name")
teaser = models.CharField(max_length=512, null=True, blank=True)
main_image = models.ImageField(
verbose_name=_("Image"), upload_to="abcast/station", null=True, blank=True
)
description = extra.MarkdownTextField(blank=True, null=True)
members = models.ManyToManyField(
settings.AUTH_USER_MODEL, through="StationMembers", blank=True
)
website = models.URLField(max_length=256, null=True, blank=True)
phone = PhoneNumberField(_("phone"), blank=True, null=True)
fax = PhoneNumberField(_("fax"), blank=True, null=True)
address1 = models.CharField(_("address"), null=True, blank=True, max_length=100)
address2 = models.CharField(
_("address (secondary)"), null=True, blank=True, max_length=100
)
city = models.CharField(_("city"), null=True, blank=True, max_length=100)
zip = models.CharField(_("zip"), null=True, blank=True, max_length=10)
country = models.ForeignKey(Country, blank=True, null=True)
class Meta:
app_label = "abcast"
verbose_name = _("Station")
verbose_name_plural = _("Stations")
ordering = ("name",)
def __str__(self):
return "%s" % self.name
# @models.permalink
# def get_absolute_url(self):
# return "abcast-station-detail", [self.uuid]
def get_absolute_url(self):
try:
url = reverse("abcast-station-detail", kwargs={"uuid": str(self.uuid)})
except:
url = ""
return url
def get_admin_url(self):
return reverse("admin:abcast_station_change", args=(self.pk,))
arating.enable_voting_on(Station)
@python_2_unicode_compatible
class Role(models.Model):
name = models.CharField(max_length=200)
class Meta:
app_label = "abcast"
verbose_name = _("Role")
verbose_name_plural = _("Roles")
ordering = ("name",)
def __str__(self):
return self.name
class StationMembers(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name="station_membership"
)
station = models.ForeignKey(Station)
roles = models.ManyToManyField(Role, blank=True, related_name="memgership_roles")
class Meta:
app_label = "abcast"
verbose_name = _("Role")
verbose_name_plural = _("Roles")
@python_2_unicode_compatible
class OnAirItem(TimestampedModelMixin, UUIDModelMixin, models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
class Meta:
app_label = "abcast"
verbose_name = _("On Air")
verbose_name_plural = _("On Air")
unique_together = ("content_type", "object_id")
def __str__(self):
return "%s : %s" % (self.channel.pk, self.channel.pk)
@python_2_unicode_compatible
class Channel(TimestampedModelMixin, UUIDModelMixin, models.Model):
name = models.CharField(max_length=256, null=True, blank=True)
teaser = models.CharField(max_length=512, null=True, blank=True)
slug = AutoSlugField(populate_from="name")
TYPE_CHOICES = (("stream", _("Stream")), ("djmon", _("DJ-Monitor")))
type = models.CharField(
verbose_name=_("Type"), max_length=12, default="stream", choices=TYPE_CHOICES
)
stream_url = models.CharField(
max_length=256,
null=True,
blank=True,
help_text=_("setting the stream-url overrides server settings"),
)
description = extra.MarkdownTextField(blank=True, null=True)
station = models.ForeignKey(
"Station", null=True, blank=True, on_delete=models.SET_NULL
)
rtmp_app = models.CharField(max_length=256, null=True, blank=True)
rtmp_path = models.CharField(max_length=256, null=True, blank=True)
has_scheduler = models.BooleanField(default=False)
mount = models.CharField(max_length=64, null=True, blank=True)
# credentials for tunein api
tunein_station_id = models.CharField(max_length=16, null=True, blank=True)
tunein_partner_id = models.CharField(max_length=16, null=True, blank=True)
tunein_partner_key = models.CharField(max_length=16, null=True, blank=True)
# credentials for icecast2 metadata
icecast2_server = models.CharField(max_length=256, null=True, blank=True)
icecast2_mountpoint = models.CharField(max_length=128, null=True, blank=True)
icecast2_admin_user = models.CharField(max_length=128, null=True, blank=True)
icecast2_admin_pass = models.CharField(max_length=128, null=True, blank=True)
on_air_type = models.ForeignKey(ContentType, null=True, blank=True)
on_air_id = models.PositiveIntegerField(null=True, blank=True)
on_air = GenericForeignKey("on_air_type", "on_air_id")
class Meta:
app_label = "abcast"
verbose_name = _("Channel")
verbose_name_plural = _("Channels")
ordering = ("name",)
unique_together = ("on_air_type", "on_air_id")
def __str__(self):
return "%s" % self.name
def get_absolute_url(self):
return reverse("abcast-station-detail", kwargs={"uuid": str(self.station.uuid)})
def get_api_url(self):
return (
reverse(
"api_dispatch_detail",
kwargs={
"api_name": "v1",
"resource_name": "abcast/channel",
"pk": self.pk,
},
)
+ ""
)
def get_dayparts(self, day):
dayparts = []
daypart_sets = self.daypartsets.filter(
time_start__lte=day, time_end__gte=day, channel=self
)
daypart_set = None
if daypart_sets.count() > 0:
daypart_set = daypart_sets[0]
if daypart_set:
for dp in daypart_set.daypart_set.all():
dayparts.append(dp)
return dayparts
def get_on_air(self):
"""
merge currently playing item (told by pypo) with estimated scheduler entry for the emission
"""
now = datetime.datetime.now()
emissions = self.scheduler_emissions.filter(
channel__pk=self.pk, time_start__lte=now, time_end__gte=now
)
if emissions.count() > 0:
emission_url = emissions.first().get_api_url()
emission_items = []
"""
for e in emission.get_timestamped_media():
item = e.content_object
emission_items.append({
'pk': item.pk,
'time_start': e.timestamp,
'resource_uri': item.get_api_url()
})
"""
else:
emission_url = None
emission_items = []
try:
item_url = self.on_air.get_api_url()
except:
item_url = None
on_air = {
"item": item_url,
"emission": emission_url,
"emission_items": emission_items,
}
return on_air
| hzlf/openbroadcast.org | website/apps/abcast/models/basemodels.py | Python | gpl-3.0 | 8,118 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xs.StringType import StringType
logger = logging.getLogger(__name__)
class NoticeType(StringType):
MODEL_MAP = {
'attributes': {
'id': {'required': True, 'type': 'NCNameType'},
},
'elements': [
{'xmlns': 'http://www.w3.org/1999/xhtml', 'tag_name': '*', 'min': 0, 'max': None},
],
}
| cjaymes/pyscap | src/scap/model/xccdf_1_2/NoticeType.py | Python | gpl-3.0 | 1,060 |
from datetime import datetime
from decimal import Decimal
import os
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.forms.formsets import formset_factory
import commonware.log
import happyforms
from quieter_formset.formset import BaseFormSet
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
from access import acl
import amo
import captcha.fields
from amo.fields import ColorField
from amo.urlresolvers import reverse
from amo.utils import slug_validator, slugify, sorted_groupby, remove_icons
from addons.models import (Addon, AddonCategory, BlacklistedSlug, Category,
Persona)
from addons.tasks import save_theme, save_theme_reupload
from addons.utils import reverse_name_lookup
from addons.widgets import IconWidgetRenderer, CategoriesSelectMultiple
from devhub import tasks as devhub_tasks
from tags.models import Tag
from translations import LOCALES
from translations.fields import TransField, TransTextarea
from translations.forms import TranslationFormMixin
from translations.models import Translation
from translations.utils import transfield_changed
from translations.widgets import TranslationTextInput
from users.models import UserEmailField
from versions.models import Version
log = commonware.log.getLogger('z.addons')
def clean_name(name, instance=None):
if not instance:
log.debug('clean_name called without an instance: %s' % name)
id = reverse_name_lookup(name)
# If we get an id and either there's no instance or the instance.id != id.
if id and (not instance or id != instance.id):
raise forms.ValidationError(_('This name is already in use. Please '
'choose another.'))
return name
def clean_slug(slug, instance):
slug_validator(slug, lower=False)
if slug != instance.slug:
if Addon.objects.filter(slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlacklistedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.' % slug))
return slug
def clean_tags(request, tags):
target = [slugify(t, spaces=True, lower=True) for t in tags.split(',')]
target = set(filter(None, target))
min_len = amo.MIN_TAG_LENGTH
max_len = Tag._meta.get_field('tag_text').max_length
max_tags = amo.MAX_TAGS
total = len(target)
blacklisted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, blacklisted=True))
if blacklisted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ngettext('Invalid tag: {0}', 'Invalid tags: {0}',
len(blacklisted)).format(', '.join(blacklisted))
raise forms.ValidationError(msg)
restricted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, restricted=True))
if not acl.action_allowed(request, 'Addons', 'Edit'):
if restricted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ngettext('"{0}" is a reserved tag and cannot be used.',
'"{0}" are reserved tags and cannot be used.',
len(restricted)).format('", "'.join(restricted))
raise forms.ValidationError(msg)
else:
# Admin's restricted tags don't count towards the limit.
total = len(target - set(restricted))
if total > max_tags:
num = total - max_tags
msg = ngettext('You have {0} too many tags.',
'You have {0} too many tags.', num).format(num)
raise forms.ValidationError(msg)
if any(t for t in target if len(t) > max_len):
raise forms.ValidationError(
_('All tags must be %s characters or less after invalid characters'
' are removed.' % max_len))
if any(t for t in target if len(t) < min_len):
msg = ngettext("All tags must be at least {0} character.",
"All tags must be at least {0} characters.",
min_len).format(min_len)
raise forms.ValidationError(msg)
return target
class AddonFormBase(TranslationFormMixin, happyforms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonFormBase, self).__init__(*args, **kw)
class Meta:
models = Addon
fields = ('name', 'slug', 'summary', 'tags')
def clean_slug(self):
return clean_slug(self.cleaned_data['slug'], self.instance)
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if acl.action_allowed(self.request, 'Addons', 'Edit'):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
class AddonFormBasic(AddonFormBase):
name = TransField(max_length=50)
slug = forms.CharField(max_length=30)
summary = TransField(widget=TransTextarea(attrs={'rows': 4}),
max_length=250)
tags = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'summary', 'tags')
def __init__(self, *args, **kw):
super(AddonFormBasic, self).__init__(*args, **kw)
self.fields['tags'].initial = ', '.join(self.get_tags(self.instance))
# Do not simply append validators, as validators will persist between
# instances.
validate_name = lambda x: clean_name(x, self.instance)
name_validators = list(self.fields['name'].validators)
name_validators.append(validate_name)
self.fields['name'].validators = name_validators
def save(self, addon, commit=False):
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AddonFormBasic, self).save(commit=False)
addonform.save()
return addonform
class AppFormBasic(AddonFormBasic):
"""Form to override name length for apps."""
name = TransField(max_length=128)
class CategoryForm(forms.Form):
application = forms.TypedChoiceField(amo.APPS_CHOICES, coerce=int,
widget=forms.HiddenInput,
required=False)
categories = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(), widget=CategoriesSelectMultiple)
def save(self, addon):
application = self.cleaned_data.get('application')
categories_new = self.cleaned_data['categories']
categories_old = [cats for app, cats in addon.app_categories if
(app and application and app.id == application)
or (not app and not application)]
if categories_old:
categories_old = categories_old[0]
# Add new categories.
for c in set(categories_new) - set(categories_old):
AddonCategory(addon=addon, category=c).save()
# Remove old categories.
for c in set(categories_old) - set(categories_new):
AddonCategory.objects.filter(addon=addon, category=c).delete()
def clean_categories(self):
categories = self.cleaned_data['categories']
total = categories.count()
max_cat = amo.MAX_CATEGORIES
if getattr(self, 'disabled', False) and total:
raise forms.ValidationError(
_('Categories cannot be changed while your add-on is featured '
'for this application.'))
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
has_misc = filter(lambda x: x.misc, categories)
if has_misc and total > 1:
raise forms.ValidationError(
_('The miscellaneous category cannot be combined with '
'additional categories.'))
return categories
class BaseCategoryFormSet(BaseFormSet):
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.request = kw.pop('request', None)
super(BaseCategoryFormSet, self).__init__(*args, **kw)
self.initial = []
apps = sorted(self.addon.compatible_apps.keys(),
key=lambda x: x.id)
# Drop any apps that don't have appropriate categories.
qs = Category.objects.filter(type=self.addon.type)
app_cats = dict((k, list(v)) for k, v in
sorted_groupby(qs, 'application'))
for app in list(apps):
if app and not app_cats.get(app.id):
apps.remove(app)
if not app_cats:
apps = []
for app in apps:
cats = dict(self.addon.app_categories).get(app, [])
self.initial.append({'categories': [c.id for c in cats]})
for app, form in zip(apps, self.forms):
key = app.id if app else None
form.request = self.request
form.initial['application'] = key
form.app = app
cats = sorted(app_cats[key], key=lambda x: x.name)
form.fields['categories'].choices = [(c.id, c.name) for c in cats]
# If this add-on is featured for this application, category
# changes are forbidden.
if not acl.action_allowed(self.request, 'Addons', 'Edit'):
form.disabled = (app and self.addon.is_featured(app))
def save(self):
for f in self.forms:
f.save(self.addon)
CategoryFormSet = formset_factory(form=CategoryForm,
formset=BaseCategoryFormSet, extra=0)
def icons():
"""
Generates a list of tuples for the default icons for add-ons,
in the format (pseudo-mime-type, description).
"""
icons = [('image/jpeg', 'jpeg'), ('image/png', 'png'), ('', 'default')]
dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH)
for fname in files:
if '32' in fname and 'default' not in fname:
icon_name = fname.split('-')[0]
icons.append(('icon/%s' % icon_name, icon_name))
return icons
class AddonFormMedia(AddonFormBase):
icon_type = forms.CharField(widget=forms.RadioSelect(
renderer=IconWidgetRenderer, choices=[]), required=False)
icon_upload_hash = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('icon_upload_hash', 'icon_type')
def __init__(self, *args, **kwargs):
super(AddonFormMedia, self).__init__(*args, **kwargs)
# Add icons here so we only read the directory when
# AddonFormMedia is actually being used.
self.fields['icon_type'].widget.choices = icons()
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
devhub_tasks.resize_icon.delay(upload_path, destination,
amo.ADDON_ICON_SIZES,
set_modified_on=[addon])
return super(AddonFormMedia, self).save(commit)
class AddonFormDetails(AddonFormBase):
default_locale = forms.TypedChoiceField(choices=LOCALES)
class Meta:
model = Addon
fields = ('description', 'default_locale', 'homepage')
def clean(self):
# Make sure we have the required translations in the new locale.
required = 'name', 'summary', 'description'
data = self.cleaned_data
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = self.cleaned_data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
# They might be setting description right now.
if 'description' in missing and locale in data['description']:
missing.remove('description')
if missing:
raise forms.ValidationError(
_('Before changing your default locale you must have a '
'name, summary, and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return data
class AddonFormSupport(AddonFormBase):
support_url = TransField.adapt(forms.URLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Addon
fields = ('support_email', 'support_url')
def __init__(self, *args, **kw):
super(AddonFormSupport, self).__init__(*args, **kw)
def save(self, addon, commit=True):
return super(AddonFormSupport, self).save(commit)
class AddonFormTechnical(AddonFormBase):
developer_comments = TransField(widget=TransTextarea, required=False)
class Meta:
model = Addon
fields = ('developer_comments', 'view_source', 'site_specific',
'external_software', 'auto_repackage', 'public_stats',
'whiteboard')
class AddonForm(happyforms.ModelForm):
name = forms.CharField(widget=TranslationTextInput,)
homepage = forms.CharField(widget=TranslationTextInput, required=False)
eula = forms.CharField(widget=TranslationTextInput,)
description = forms.CharField(widget=TranslationTextInput,)
developer_comments = forms.CharField(widget=TranslationTextInput,)
privacy_policy = forms.CharField(widget=TranslationTextInput,)
the_future = forms.CharField(widget=TranslationTextInput,)
the_reason = forms.CharField(widget=TranslationTextInput,)
support_email = forms.CharField(widget=TranslationTextInput,)
class Meta:
model = Addon
fields = ('name', 'homepage', 'default_locale', 'support_email',
'support_url', 'description', 'summary',
'developer_comments', 'eula', 'privacy_policy', 'the_reason',
'the_future', 'view_source', 'prerelease', 'site_specific',)
exclude = ('status', )
def clean_name(self):
return clean_name(self.cleaned_data['name'])
def save(self):
desc = self.data.get('description')
if desc and desc != unicode(self.instance.description):
amo.log(amo.LOG.EDIT_DESCRIPTIONS, self.instance)
if self.changed_data:
amo.log(amo.LOG.EDIT_PROPERTIES, self.instance)
super(AddonForm, self).save()
class AbuseForm(happyforms.Form):
recaptcha = captcha.fields.ReCaptchaField(label='')
text = forms.CharField(required=True,
label='',
widget=forms.Textarea())
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(AbuseForm, self).__init__(*args, **kwargs)
if (not self.request.user.is_anonymous() or
not settings.RECAPTCHA_PRIVATE_KEY):
del self.fields['recaptcha']
class ThemeFormBase(AddonFormBase):
def __init__(self, *args, **kwargs):
super(ThemeFormBase, self).__init__(*args, **kwargs)
cats = Category.objects.filter(type=amo.ADDON_PERSONA, weight__gte=0)
cats = sorted(cats, key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
for field in ('header', 'footer'):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.upload_persona',
args=['persona_%s' % field]),
'data-allowed-types': 'image/jpeg|image/png'
}
def clean_name(self):
return clean_name(self.cleaned_data['name'])
def clean_slug(self):
return clean_slug(self.cleaned_data['slug'], self.instance)
class ThemeForm(ThemeFormBase):
name = forms.CharField(max_length=50)
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 4}),
max_length=500, required=False)
tags = forms.CharField(required=False)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES,
coerce=int, empty_value=None, widget=forms.HiddenInput,
error_messages={'required': _lazy(u'A license must be selected.')})
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput)
footer = forms.FileField(required=False)
footer_hash = forms.CharField(widget=forms.HiddenInput, required=False)
# Native color picker doesn't allow real time tracking of user input
# and empty values, thus force the JavaScript color picker for now.
# See bugs 1005206 and 1003575.
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
agreed = forms.BooleanField()
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors. It's really clever.
unsaved_data = forms.CharField(required=False, widget=forms.HiddenInput)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def save(self, commit=False):
data = self.cleaned_data
addon = Addon.objects.create(
slug=data.get('slug'),
status=amo.STATUS_PENDING, type=amo.ADDON_PERSONA)
addon.name = {'en-US': data['name']}
if data.get('description'):
addon.description = data['description']
addon._current_version = Version.objects.create(addon=addon,
version='0')
addon.save()
# Create Persona instance.
p = Persona()
p.persona_id = 0
p.addon = addon
p.header = 'header.png'
if data['footer_hash']:
p.footer = 'footer.png'
if data['accentcolor']:
p.accentcolor = data['accentcolor'].lstrip('#')
if data['textcolor']:
p.textcolor = data['textcolor'].lstrip('#')
p.license = data['license']
p.submit = datetime.now()
user = self.request.amo_user
p.author = user.username
p.display_username = user.name
p.save()
# Save header, footer, and preview images.
save_theme.delay(data['header_hash'], data['footer_hash'], addon)
# Save user info.
addon.addonuser_set.create(user=user, role=amo.AUTHOR_ROLE_OWNER)
# Save tags.
for t in data['tags']:
Tag(tag_text=t).save_tag(addon)
# Save categories.
AddonCategory(addon=addon, category=data['category']).save()
return addon
class EditThemeForm(AddonFormBase):
name = TransField(max_length=50, label=_lazy('Give Your Theme a Name.'))
slug = forms.CharField(max_length=30)
category = forms.ModelChoiceField(queryset=Category.objects.all(),
widget=forms.widgets.RadioSelect)
description = TransField(
widget=TransTextarea(attrs={'rows': 4}),
max_length=500, required=False, label=_lazy('Describe your Theme.'))
tags = forms.CharField(required=False)
accentcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
textcolor = ColorField(
required=False,
widget=forms.TextInput(attrs={'class': 'color-picker'}),
)
license = forms.TypedChoiceField(
choices=amo.PERSONA_LICENSES_CHOICES, coerce=int, empty_value=None,
widget=forms.HiddenInput,
error_messages={'required': _lazy(u'A license must be selected.')})
# Theme re-upload.
header = forms.FileField(required=False)
header_hash = forms.CharField(widget=forms.HiddenInput, required=False)
footer = forms.FileField(required=False)
footer_hash = forms.CharField(widget=forms.HiddenInput, required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'description', 'tags')
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonFormBase, self).__init__(*args, **kw)
addon = Addon.objects.no_cache().get(id=self.instance.id)
persona = addon.persona
# Do not simply append validators, as validators will persist between
# instances.
self.fields['name'].validators = list(self.fields['name'].validators)
self.fields['name'].validators.append(lambda x: clean_name(x, addon))
# Allow theme artists to localize Name and Description.
for trans in Translation.objects.filter(id=self.initial['name']):
self.initial['name_' + trans.locale.lower()] = trans
for trans in Translation.objects.filter(
id=self.initial['description']):
self.initial['description_' + trans.locale.lower()] = trans
self.old_tags = self.get_tags(addon)
self.initial['tags'] = ', '.join(self.old_tags)
if persona.accentcolor:
self.initial['accentcolor'] = '#' + persona.accentcolor
if persona.textcolor:
self.initial['textcolor'] = '#' + persona.textcolor
self.initial['license'] = persona.license
cats = sorted(Category.objects.filter(type=amo.ADDON_PERSONA,
weight__gte=0),
key=lambda x: x.name)
self.fields['category'].choices = [(c.id, c.name) for c in cats]
try:
self.initial['category'] = addon.categories.values_list(
'id', flat=True)[0]
except IndexError:
pass
for field in ('header', 'footer'):
self.fields[field].widget.attrs = {
'data-upload-url': reverse('devhub.personas.reupload_persona',
args=[addon.slug,
'persona_%s' % field]),
'data-allowed-types': 'image/jpeg|image/png'
}
def save(self):
addon = self.instance
persona = addon.persona
data = self.cleaned_data
# Update Persona-specific data.
persona_data = {
'license': int(data['license']),
'accentcolor': data['accentcolor'].lstrip('#'),
'textcolor': data['textcolor'].lstrip('#'),
'author': self.request.amo_user.username,
'display_username': self.request.amo_user.name
}
changed = False
for k, v in persona_data.iteritems():
if v != getattr(persona, k):
changed = True
setattr(persona, k, v)
if changed:
persona.save()
if self.changed_data:
amo.log(amo.LOG.EDIT_PROPERTIES, addon)
self.instance.modified = datetime.now()
# Update Addon-specific data.
changed = (
set(self.old_tags) != data['tags'] or # Check if tags changed.
self.initial['slug'] != data['slug'] or # Check if slug changed.
transfield_changed('description', self.initial, data) or
transfield_changed('name', self.initial, data))
if changed:
# Only save if addon data changed.
super(EditThemeForm, self).save()
# Update tags.
tags_new = data['tags']
tags_old = [slugify(t, spaces=True) for t in self.old_tags]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# Update category.
if data['category'].id != self.initial['category']:
addon_cat = addon.addoncategory_set.all()[0]
addon_cat.category = data['category']
addon_cat.save()
# Theme reupload.
if not addon.is_pending():
if data['header_hash'] or data['footer_hash']:
save_theme_reupload.delay(
data['header_hash'], data['footer_hash'], addon)
return data
class EditThemeOwnerForm(happyforms.Form):
owner = UserEmailField()
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(EditThemeOwnerForm, self).__init__(*args, **kw)
addon = self.instance
self.fields['owner'].widget.attrs['placeholder'] = _(
"Enter a new author's email address")
try:
self.instance_addonuser = addon.addonuser_set.all()[0]
self.initial['owner'] = self.instance_addonuser.user.email
except IndexError:
# If there was never an author before, then don't require one now.
self.instance_addonuser = None
self.fields['owner'].required = False
def save(self):
data = self.cleaned_data
if data.get('owner'):
changed = (not self.instance_addonuser or
self.instance_addonuser != data['owner'])
if changed:
# Update Persona-specific data.
persona = self.instance.persona
persona.author = data['owner'].username
persona.display_username = data['owner'].name
persona.save()
if not self.instance_addonuser:
# If there previously never another owner, create one.
self.instance.addonuser_set.create(user=data['owner'],
role=amo.AUTHOR_ROLE_OWNER)
elif self.instance_addonuser != data['owner']:
# If the owner has changed, update the `AddonUser` object.
self.instance_addonuser.user = data['owner']
self.instance_addonuser.role = amo.AUTHOR_ROLE_OWNER
self.instance_addonuser.save()
self.instance.modified = datetime.now()
self.instance.save()
return data
class ContributionForm(happyforms.Form):
amount = forms.DecimalField(required=True, min_value=Decimal('0.01'))
| SuriyaaKudoIsc/olympia | apps/addons/forms.py | Python | bsd-3-clause | 27,845 |
#!/usr/bin/env python
from functools import wraps
# DATABASE INTERACTION FUNCTIONS
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_db()
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
# Wrapper func to return different last page of standings
def is_last_round(func):
def decorator(f):
@wraps(func)
def decorated_function(*args, *kwargs):
# get the max round value and check if round_num is last round
if round_num == final_round:
return render_template('final_results.html')
else:
return render_template('standings.html', round_num=round_num)
return decorated_function
return decorator
| petr-tik/chess_app | app/database_funcs.py | Python | mit | 1,195 |
""" DIRAC DirectoryTree base class """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import time
import threading
import os
import stat
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.Utilities import getIDSelectString
DEBUG = 0
#############################################################################
class DirectoryTreeBase(object):
def __init__(self, database=None):
self.db = database
self.lock = threading.Lock()
self.treeTable = ""
############################################################################
#
# THE FOLLOWING METHODS NEED TO ME IMPLEMENTED IN THE DERIVED CLASS
#
############################################################################
def findDir(self, path, connection=False):
"""Find directory ID for the given path"""
return S_ERROR("To be implemented on derived class")
def findDirs(self, paths, connection=False):
"""Find DirIDs for the given path list"""
return S_ERROR("To be implemented on derived class")
def makeDir(self, path):
return S_ERROR("To be implemented on derived class")
def removeDir(self, path):
return S_ERROR("To be implemented on derived class")
def getChildren(self, path, connection=False):
return S_ERROR("To be implemented on derived class")
def getDirectoryPath(self, dirID):
"""Get directory name by directory ID"""
return S_ERROR("To be implemented on derived class")
def countSubdirectories(self, dirId, includeParent=True):
return S_ERROR("To be implemented on derived class")
def getSubdirectoriesByID(self, dirID, requestString=False, includeParent=False):
"""Get all the subdirectories of the given directory at a given level"""
return S_ERROR("To be implemented on derived class")
##########################################################################
def _getConnection(self, connection):
if connection:
return connection
res = self.db._getConnection()
if res["OK"]:
return res["Value"]
gLogger.warn("Failed to get MySQL connection", res["Message"])
return connection
def getTreeTable(self):
"""Get the string of the Directory Tree type"""
return self.treeTable
def setDatabase(self, database):
self.db = database
def makeDirectory(self, path, credDict, status=0):
"""Create a new directory. The return value is the dictionary
containing all the parameters of the newly created directory
"""
if path[0] != "/":
return S_ERROR("Not an absolute path")
# Strip off the trailing slash if necessary
if len(path) > 1 and path[-1] == "/":
path = path[:-1]
if path == "/":
# Create the root directory
l_uid = 0
l_gid = 0
else:
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result["OK"]:
return result
(l_uid, l_gid) = result["Value"]
dirDict = {}
result = self.makeDir(path)
if not result["OK"]:
return result
dirID = result["Value"]
if result["NewDirectory"]:
req = "INSERT INTO FC_DirectoryInfo (DirID,UID,GID,CreationDate,ModificationDate,Mode,Status) Values "
req = req + "(%d,%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP(),%d,%d)" % (
dirID,
l_uid,
l_gid,
self.db.umask,
status,
)
result = self.db._update(req)
if result["OK"]:
resGet = self.getDirectoryParameters(dirID)
if resGet["OK"]:
dirDict = resGet["Value"]
else:
return S_OK(dirID)
if not dirDict:
self.removeDir(path)
return S_ERROR("Failed to create directory %s" % path)
return S_OK(dirID)
#####################################################################
def makeDirectories(self, path, credDict):
"""Make all the directories recursively in the path. The return value
is the dictionary containing all the parameters of the newly created
directory
"""
if not path or path[0] != "/":
return S_ERROR("Not an absolute path")
result = self.existsDir(path)
if not result["OK"]:
return result
result = result["Value"]
if result["Exists"]:
return S_OK(result["DirID"])
if path == "/":
result = self.makeDirectory(path, credDict)
return result
parentDir = os.path.dirname(path)
result = self.existsDir(parentDir)
if not result["OK"]:
return result
result = result["Value"]
if result["Exists"]:
result = self.makeDirectory(path, credDict)
else:
result = self.makeDirectories(parentDir, credDict)
if not result["OK"]:
return result
result = self.makeDirectory(path, credDict)
return result
#####################################################################
def exists(self, lfns):
successful = {}
failed = {}
for lfn in lfns:
res = self.findDir(lfn)
if not res["OK"]:
failed[lfn] = res["Message"]
if not res["Value"]:
successful[lfn] = False
else:
successful[lfn] = lfn
return S_OK({"Successful": successful, "Failed": failed})
def existsDir(self, path):
"""Check the existence of the directory path"""
result = self.findDir(path)
if not result["OK"]:
return result
if result["Value"]:
result = S_OK(int(result["Value"]))
result["Exists"] = True
result["DirID"] = result["Value"]
else:
result = S_OK(0)
result["Exists"] = False
return result
#####################################################################
def isDirectory(self, paths):
"""Checking for existence of directories"""
successful = {}
failed = {}
for dir in paths:
result = self.existsDir(dir)
if not result["OK"]:
failed[dir] = result["Message"]
elif result["Value"]["Exists"]:
successful[dir] = True
else:
successful[dir] = False
return S_OK({"Successful": successful, "Failed": failed})
#####################################################################
def createDirectory(self, dirs, credDict):
"""Checking for existence of directories"""
successful = {}
failed = {}
for dir in dirs:
result = self.makeDirectories(dir, credDict)
if not result["OK"]:
failed[dir] = result["Message"]
else:
successful[dir] = True
return S_OK({"Successful": successful, "Failed": failed})
#####################################################################
def isEmpty(self, path):
"""Find out if the given directory is empty"""
# Check if there are subdirectories
result = self.getChildren(path)
if not result["OK"]:
return result
childIDs = result["Value"]
if childIDs:
return S_OK(False)
# Check if there are files
result = self.__getDirID(path)
if not result["OK"]:
return result
dirID = result["Value"]
result = self.db.fileManager.getFilesInDirectory(dirID)
if not result["OK"]:
return result
files = result["Value"]
if files:
return S_OK(False)
return S_OK(True)
#####################################################################
def removeDirectory(self, dirs, force=False):
"""Remove an empty directory from the catalog"""
successful = {}
failed = {}
# Check if requested directories exist in the catalog
result = self.findDirs(dirs)
if not result["OK"]:
return result
dirDict = result["Value"]
for d in dirs:
if d not in dirDict:
successful[d] = "Directory does not exist"
for dir in dirDict:
result = self.isEmpty(dir)
if not result["OK"]:
return result
if not result["Value"]:
failed[dir] = "Failed to remove non-empty directory"
continue
result = self.removeDir(dir)
if not result["OK"]:
failed[dir] = result["Message"]
else:
successful[dir] = result
return S_OK({"Successful": successful, "Failed": failed})
#####################################################################
def __getDirID(self, path):
"""Get directory ID from the given path or already evaluated ID"""
if isinstance(path, six.string_types):
result = self.findDir(path)
if not result["OK"]:
return result
dirID = result["Value"]
if not dirID:
return S_ERROR("%s: not found" % str(path))
return S_OK(dirID)
else:
return S_OK(path)
#####################################################################
def getDirectoryParameters(self, path):
"""Get the given directory parameters"""
result = self.__getDirID(path)
if not result["OK"]:
return result
dirID = result["Value"]
query = "SELECT DirID,UID,GID,Status,Mode,CreationDate,ModificationDate from FC_DirectoryInfo"
query = query + " WHERE DirID=%d" % dirID
resQuery = self.db._query(query)
if not resQuery["OK"]:
return resQuery
if not resQuery["Value"]:
return S_ERROR("Directory not found")
dirDict = {}
dirDict["DirID"] = int(resQuery["Value"][0][0])
uid = int(resQuery["Value"][0][1])
dirDict["UID"] = uid
owner = "unknown"
result = self.db.ugManager.getUserName(uid)
if result["OK"]:
owner = result["Value"]
dirDict["Owner"] = owner
gid = int(resQuery["Value"][0][2])
dirDict["GID"] = int(resQuery["Value"][0][2])
group = "unknown"
result = self.db.ugManager.getGroupName(gid)
if result["OK"]:
group = result["Value"]
dirDict["OwnerGroup"] = group
dirDict["Status"] = int(resQuery["Value"][0][3])
dirDict["Mode"] = int(resQuery["Value"][0][4])
dirDict["CreationDate"] = resQuery["Value"][0][5]
dirDict["ModificationDate"] = resQuery["Value"][0][6]
return S_OK(dirDict)
#####################################################################
def _setDirectoryParameter(self, path, pname, pvalue):
"""Set a numerical directory parameter
:param mixed path: Directory path or paths as a string or directory ID as int,
list/tuple of ints or a string to select directory IDs
:param str pname: parameter name
:param int pvalue: parameter value
"""
result = getIDSelectString(path)
if not result["OK"] and isinstance(path, six.string_types):
result = self.__getDirID(path)
if not result["OK"]:
return result
dirID = result["Value"]
result = getIDSelectString(dirID)
if not result["OK"]:
return result
dirIDString = result["Value"]
req = "UPDATE FC_DirectoryInfo SET %s=%d, " "ModificationDate=UTC_TIMESTAMP() WHERE DirID IN ( %s )" % (
pname,
pvalue,
dirIDString,
)
result = self.db._update(req)
return result
#####################################################################
def _setDirectoryGroup(self, path, gname):
"""Set the directory group
:param mixed path: directory path as a string or int or list of ints or select statement
:param mixt group: new group as a string or int gid
"""
result = self.db.ugManager.findGroup(gname)
if not result["OK"]:
return result
gid = result["Value"]
return self._setDirectoryParameter(path, "GID", gid)
#####################################################################
def _setDirectoryOwner(self, path, owner):
"""Set the directory owner
:param mixed path: directory path as a string or int or list of ints or select statement
:param mixt owner: new user as a string or int uid
"""
result = self.db.ugManager.findUser(owner)
if not result["OK"]:
return result
uid = result["Value"]
return self._setDirectoryParameter(path, "UID", uid)
#####################################################################
def changeDirectoryOwner(self, paths, recursive=False):
"""Bulk setting of the directory owner
:param dictionary paths: dictionary < lfn : owner >
"""
return self._changeDirectoryParameter(
paths, self._setDirectoryOwner, self.db.fileManager.setFileOwner, recursive=recursive
)
#####################################################################
def changeDirectoryGroup(self, paths, recursive=False):
"""Bulk setting of the directory group
:param dictionary paths: dictionary < lfn : group >
"""
return self._changeDirectoryParameter(
paths, self._setDirectoryGroup, self.db.fileManager.setFileGroup, recursive=recursive
)
#####################################################################
def _setDirectoryMode(self, path, mode):
"""set the directory mode
:param mixed path: directory path as a string or int or list of ints or select statement
:param int mode: new mode
"""
return self._setDirectoryParameter(path, "Mode", mode)
#####################################################################
def changeDirectoryMode(self, paths, recursive=False):
"""Bulk setting of the directory mode
:param dictionary paths: dictionary < lfn : mode >
"""
return self._changeDirectoryParameter(
paths, self._setDirectoryMode, self.db.fileManager.setFileMode, recursive=recursive
)
#####################################################################
def _changeDirectoryParameter(self, paths, directoryFunction, fileFunction, recursive=False):
"""Bulk setting of the directory parameter with recursion for all the subdirectories and files
:param dictionary paths: dictionary < lfn : value >, where value is the value of parameter to be set
:param function directoryFunction: function to change directory(ies) parameter
:param function fileFunction: function to change file(s) parameter
:param bool recursive: flag to apply the operation recursively
"""
arguments = paths
successful = {}
failed = {}
for path, attribute in arguments.items():
result = directoryFunction(path, attribute)
if not result["OK"]:
failed[path] = result["Message"]
continue
if recursive:
result = self.__getDirID(path)
if not result["OK"]:
failed[path] = result["Message"]
continue
dirID = result["Value"]
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result["OK"]:
failed[path] = result["Message"]
continue
subDirQuery = result["Value"]
result = self.db.fileManager.getFileIDsInDirectory(subDirQuery, requestString=True)
if not result["OK"]:
failed[path] = result["Message"]
continue
fileQuery = result["Value"]
result = directoryFunction(subDirQuery, attribute)
if not result["OK"]:
failed[path] = result["Message"]
continue
result = fileFunction(fileQuery, attribute)
if not result["OK"]:
failed[path] = result["Message"]
else:
successful[path] = True
else:
successful[path] = True
return S_OK({"Successful": successful, "Failed": failed})
#####################################################################
def setDirectoryStatus(self, path, status):
"""set the directory status"""
return self._setDirectoryParameter(path, "Status", status)
def getPathPermissions(self, lfns, credDict):
"""Get permissions for the given user/group to manipulate the given lfns"""
successful = {}
failed = {}
for path in lfns:
result = self.getDirectoryPermissions(path, credDict)
if not result["OK"]:
failed[path] = result["Message"]
else:
successful[path] = result["Value"]
return S_OK({"Successful": successful, "Failed": failed})
#####################################################################
def getDirectoryPermissions(self, path, credDict):
"""Get permissions for the given user/group to manipulate the given directory"""
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result["OK"]:
return result
uid, gid = result["Value"]
result = self.getDirectoryParameters(path)
if not result["OK"]:
if "not found" in result["Message"] or "not exist" in result["Message"]:
# If the directory does not exist, check the nearest parent for the permissions
if path == "/":
# Nothing yet exists, starting from the scratch
resultDict = {}
resultDict["Write"] = True
resultDict["Read"] = True
resultDict["Execute"] = True
return S_OK(resultDict)
else:
pDir = os.path.dirname(path)
if not pDir:
return S_ERROR("Illegal Path")
if pDir == path:
# If pDir == path, then we're stuck in a loop
# There is probably a "//" in the path
return S_ERROR("Bad Path (double /?)")
result = self.getDirectoryPermissions(pDir, credDict)
return result
else:
return result
dUid = result["Value"]["UID"]
dGid = result["Value"]["GID"]
mode = result["Value"]["Mode"]
owner = uid == dUid
group = gid == dGid
resultDict = {}
if self.db.globalReadAccess:
resultDict["Read"] = True
else:
resultDict["Read"] = (
(owner and mode & stat.S_IRUSR > 0) or (group and mode & stat.S_IRGRP > 0) or mode & stat.S_IROTH > 0
)
resultDict["Write"] = (
(owner and mode & stat.S_IWUSR > 0) or (group and mode & stat.S_IWGRP > 0) or mode & stat.S_IWOTH > 0
)
resultDict["Execute"] = (
(owner and mode & stat.S_IXUSR > 0) or (group and mode & stat.S_IXGRP > 0) or mode & stat.S_IXOTH > 0
)
return S_OK(resultDict)
def getFileIDsInDirectoryWithLimits(self, dirID, credDict, startItem=1, maxItems=25):
"""Get file IDs for the given directory"""
dirs = dirID
if not isinstance(dirID, list):
dirs = [dirID]
if not dirs:
dirs = [-1]
dirListString = ",".join([str(dir) for dir in dirs])
req = "SELECT COUNT( DirID ) FROM FC_Files USE INDEX (DirID) WHERE DirID IN ( %s )" % dirListString
result = self.db._query(req)
if not result["OK"]:
return result
totalRecords = result["Value"][0][0]
if not totalRecords:
result = S_OK([])
result["TotalRecords"] = totalRecords
return result
req = "SELECT FileID FROM FC_Files WHERE DirID IN ( %s ) LIMIT %s, %s " % (dirListString, startItem, maxItems)
result = self.db._query(req)
if not result["OK"]:
return result
result = S_OK([fileId[0] for fileId in result["Value"]])
result["TotalRecords"] = totalRecords
return result
def getFileLFNsInDirectory(self, dirID, credDict):
"""Get file lfns for the given directory or directory list"""
dirs = dirID
if not isinstance(dirID, list):
dirs = [dirID]
dirListString = ",".join([str(dir) for dir in dirs])
treeTable = self.getTreeTable()
req = "SELECT CONCAT(D.DirName,'/',F.FileName) FROM FC_Files as F, %s as D WHERE D.DirID IN ( %s ) and D.DirID=F.DirID"
req = req % (treeTable, dirListString)
result = self.db._query(req)
if not result["OK"]:
return result
lfnList = [x[0] for x in result["Value"]]
return S_OK(lfnList)
def getFileLFNsInDirectoryByDirectory(self, dirIDList, credDict):
"""Get file LFNs and IDs for the given directory or directory list
:param list dirIDList: List of directory IDs
:param dict credDict: dictionary of user credentials
:return: S_OK/S_ERROR with Value dictionary {"DirLFNDict": dirLfnDict, "IDLFNDict": idLfnDict}
where dirLfnDict has the structure <directory_name>:<list of contained file names>,
idLfnDict has structure <fileID>:<LFN>
"""
dirs = dirIDList
if not isinstance(dirIDList, list):
dirs = [dirIDList]
dirListString = ",".join([str(dir_) for dir_ in dirs])
treeTable = self.getTreeTable()
req = "SELECT D.DirName,F.FileName,F.FileID FROM FC_Files as F, %s as D WHERE D.DirID IN ( %s ) and D.DirID=F.DirID"
req = req % (treeTable, dirListString)
result = self.db._query(req)
if not result["OK"]:
return result
dirLfnDict = {}
idLfnDict = {}
for dir_, fname, fileID in result["Value"]:
dirLfnDict.setdefault(dir_, []).append(fname)
idLfnDict[fileID] = dir_ + "/" + fname
return S_OK({"DirLFNDict": dirLfnDict, "IDLFNDict": idLfnDict})
def _getDirectoryContents(self, path, details=False):
"""Get contents of a given directory"""
result = self.findDir(path)
if not result["OK"]:
return result
directoryID = result["Value"]
directories = {}
files = {}
links = {}
result = self.getChildren(path)
if not result["OK"]:
return result
# Get subdirectories
dirIDList = result["Value"]
for dirID in dirIDList:
result = self.getDirectoryPath(dirID)
if not result["OK"]:
return result
dirName = result["Value"]
if details:
result = self.getDirectoryParameters(dirID)
if not result["OK"]:
directories[dirName] = False
else:
directories[dirName] = result["Value"]
else:
directories[dirName] = True
result = self.db.fileManager.getFilesInDirectory(directoryID, verbose=details)
if not result["OK"]:
return result
files = result["Value"]
result = self.db.datasetManager.getDatasetsInDirectory(directoryID, verbose=details)
if not result["OK"]:
return result
datasets = result["Value"]
pathDict = {"Files": files, "SubDirs": directories, "Links": links, "Datasets": datasets}
return S_OK(pathDict)
def listDirectory(self, lfns, verbose=False):
"""Get the directory listing"""
successful = {}
failed = {}
for path in lfns:
result = self._getDirectoryContents(path, details=verbose)
if not result["OK"]:
failed[path] = result["Message"]
else:
successful[path] = result["Value"]
return S_OK({"Successful": successful, "Failed": failed})
def getDirectoryReplicas(self, lfns, allStatus=False):
"""Get replicas for files in the given directories"""
successful = {}
failed = {}
for path in lfns:
result = self.findDir(path)
if not result["OK"]:
failed[path] = result["Message"]
continue
directoryID = result["Value"]
result = self.db.fileManager.getDirectoryReplicas(directoryID, path, allStatus)
if not result["OK"]:
failed[path] = result["Message"]
continue
fileDict = result["Value"]
successful[path] = {}
for fileName in fileDict:
successful[path][fileName] = fileDict[fileName]
return S_OK({"Successful": successful, "Failed": failed})
def getDirectorySize(self, lfns, longOutput=False, rawFileTables=False, recursiveSum=True):
"""
Get the total size of the requested directories. If longOutput flag
is True, get also physical size per Storage Element
:param bool longOutput: if True, also fetches the physical size per SE
:param bool rawFileTables: if True, uses the File table instead of the pre-computed values
:param bool recursiveSum: if True (default), takes into account subdirectories
"""
start = time.time()
result = self.db._getConnection()
if not result["OK"]:
return result
connection = result["Value"]
if rawFileTables:
resultLogical = self._getDirectoryLogicalSize(lfns, recursiveSum=recursiveSum, connection=connection)
else:
resultLogical = self._getDirectoryLogicalSizeFromUsage(
lfns, recursiveSum=recursiveSum, connection=connection
)
if not resultLogical["OK"]:
connection.close()
return resultLogical
resultDict = resultLogical["Value"]
if not resultDict["Successful"]:
connection.close()
return resultLogical
if longOutput:
# Continue with only successful directories
if rawFileTables:
resultPhysical = self._getDirectoryPhysicalSize(
resultDict["Successful"], recursiveSum=recursiveSum, connection=connection
)
else:
resultPhysical = self._getDirectoryPhysicalSizeFromUsage(
resultDict["Successful"], recursiveSum=recursiveSum, connection=connection
)
if not resultPhysical["OK"]:
resultDict["QueryTime"] = time.time() - start
result = S_OK(resultDict)
result["Message"] = "Failed to get the physical size on storage"
connection.close()
return result
for lfn in resultPhysical["Value"]["Successful"]:
resultDict["Successful"][lfn]["PhysicalSize"] = resultPhysical["Value"]["Successful"][lfn]
connection.close()
resultDict["QueryTime"] = time.time() - start
return S_OK(resultDict)
def _getDirectoryLogicalSizeFromUsage(self, lfns, recursiveSum=True, connection=None):
"""Get the total "logical" size of the requested directories
:param recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
for path in lfns:
result = self.findDir(path)
if not result["OK"]:
failed[path] = "Directory not found"
continue
if not result["Value"]:
failed[path] = "Directory not found"
continue
dirID = result["Value"]
req = "SELECT SESize, SEFiles FROM FC_DirectoryUsage WHERE SEID=0 AND DirID=%d" % dirID
result = self.db._query(req, connection)
if not result["OK"]:
failed[path] = result["Message"]
elif not result["Value"]:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, "LogicalDirectories": 0}
elif result["Value"][0][0]:
successful[path] = {
"LogicalSize": int(result["Value"][0][0]),
"LogicalFiles": int(result["Value"][0][1]),
}
result = self.countSubdirectories(dirID, includeParent=False)
if result["OK"]:
successful[path]["LogicalDirectories"] = result["Value"]
else:
successful[path]["LogicalDirectories"] = -1
else:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, "LogicalDirectories": 0}
return S_OK({"Successful": successful, "Failed": failed})
def _getDirectoryLogicalSize(self, lfns, recursiveSum=True, connection=None):
"""Get the total "logical" size of the requested directories
:param bool recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
treeTable = self.getTreeTable()
for path in lfns:
if path == "/":
req = "SELECT SUM(Size),COUNT(*) FROM FC_Files"
reqDir = "SELECT count(*) FROM %s" % treeTable
else:
result = self.findDir(path)
if not result["OK"]:
failed[path] = "Directory not found"
continue
if not result["Value"]:
failed[path] = "Directory not found"
continue
dirID = result["Value"]
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result["OK"]:
failed[path] = result["Message"]
continue
else:
dirString = result["Value"]
req = (
"SELECT SUM(F.Size),COUNT(*) FROM FC_Files as F JOIN (%s) as T WHERE F.DirID=T.DirID"
% dirString
)
reqDir = dirString.replace("SELECT DirID FROM", "SELECT count(*) FROM")
result = self.db._query(req, connection)
if not result["OK"]:
failed[path] = result["Message"]
elif not result["Value"]:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, "LogicalDirectories": 0}
elif result["Value"][0][0]:
successful[path] = {
"LogicalSize": int(result["Value"][0][0]),
"LogicalFiles": int(result["Value"][0][1]),
}
result = self.db._query(reqDir, connection)
if result["OK"] and result["Value"]:
successful[path]["LogicalDirectories"] = result["Value"][0][0] - 1
else:
successful[path]["LogicalDirectories"] = -1
else:
successful[path] = {"LogicalSize": 0, "LogicalFiles": 0, "LogicalDirectories": 0}
return S_OK({"Successful": successful, "Failed": failed})
def _getDirectoryPhysicalSizeFromUsage(self, lfns, recursiveSum=True, connection=None):
"""Get the total size of the requested directories
:param recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
for path in lfns:
result = self.findDir(path)
if not result["OK"]:
failed[path] = "Directory not found"
continue
if not result["Value"]:
failed[path] = "Directory not found"
continue
dirID = result["Value"]
req = "SELECT S.SEID, S.SEName, D.SESize, D.SEFiles FROM FC_DirectoryUsage as D, FC_StorageElements as S"
req += " WHERE S.SEID=D.SEID AND D.DirID=%d" % dirID
result = self.db._query(req, connection)
if not result["OK"]:
failed[path] = result["Message"]
elif not result["Value"]:
successful[path] = {}
elif result["Value"][0][0]:
seDict = {}
totalSize = 0
totalFiles = 0
for seID, seName, seSize, seFiles in result["Value"]:
if seSize or seFiles:
seDict[seName] = {"Size": seSize, "Files": seFiles}
totalSize += seSize
totalFiles += seFiles
else:
req = "DELETE FROM FC_DirectoryUsage WHERE SEID=%d AND DirID=%d" % (seID, dirID)
result = self.db._update(req)
if not result["OK"]:
gLogger.error("Failed to delete entry from FC_DirectoryUsage", result["Message"])
seDict["TotalSize"] = int(totalSize)
seDict["TotalFiles"] = int(totalFiles)
successful[path] = seDict
else:
successful[path] = {}
return S_OK({"Successful": successful, "Failed": failed})
def _getDirectoryPhysicalSizeFromUsage_old(self, lfns, connection):
"""Get the total size of the requested directories"""
successful = {}
failed = {}
for path in lfns:
if path == "/":
req = "SELECT S.SEName, D.SESize, D.SEFiles FROM FC_DirectoryUsage as D, FC_StorageElements as S"
req += " WHERE S.SEID=D.SEID"
else:
result = self.findDir(path)
if not result["OK"]:
failed[path] = "Directory not found"
continue
if not result["Value"]:
failed[path] = "Directory not found"
continue
dirID = result["Value"]
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result["OK"]:
return result
subDirString = result["Value"]
req = "SELECT S.SEName, D.SESize, D.SEFiles FROM FC_DirectoryUsage as D, FC_StorageElements as S"
req += " JOIN (%s) AS F" % subDirString
req += " WHERE S.SEID=D.SEID AND D.DirID=F.DirID"
result = self.db._query(req, connection)
if not result["OK"]:
failed[path] = result["Message"]
elif not result["Value"]:
successful[path] = {}
elif result["Value"][0][0]:
seDict = {}
totalSize = 0
totalFiles = 0
for seName, seSize, seFiles in result["Value"]:
sfDict = seDict.get(seName, {"Size": 0, "Files": 0})
sfDict["Size"] += seSize
sfDict["Files"] += seFiles
seDict[seName] = sfDict
totalSize += seSize
totalFiles += seFiles
seDict["TotalSize"] = int(totalSize)
seDict["TotalFiles"] = int(totalFiles)
successful[path] = seDict
else:
successful[path] = {}
return S_OK({"Successful": successful, "Failed": failed})
def _getDirectoryPhysicalSize(self, lfns, recursiveSum=True, connection=None):
"""Get the total size of the requested directories
:param recursiveSum: If false, don't take subdir into account
"""
if not recursiveSum:
return S_ERROR("Not implemented")
successful = {}
failed = {}
for path in lfns:
if path == "/":
req = "SELECT SUM(F.Size),COUNT(F.Size),S.SEName from FC_Files as F, FC_Replicas as R, FC_StorageElements as S "
req += "WHERE R.SEID=S.SEID AND F.FileID=R.FileID "
req += "GROUP BY S.SEID"
else:
result = self.findDir(path)
if not result["OK"]:
failed[path] = "Directory not found"
continue
if not result["Value"]:
failed[path] = "Directory not found"
continue
dirID = result["Value"]
result = self.getSubdirectoriesByID(dirID, requestString=True, includeParent=True)
if not result["OK"]:
failed[path] = result["Message"]
continue
else:
dirString = result["Value"]
req = (
"SELECT SUM(F.Size),COUNT(F.Size),S.SEName from FC_Files as F, FC_Replicas as R, FC_StorageElements as S JOIN (%s) as T "
% dirString
)
req += "WHERE R.SEID=S.SEID AND F.FileID=R.FileID AND F.DirID=T.DirID "
req += "GROUP BY S.SEID"
result = self.db._query(req, connection)
if not result["OK"]:
failed[path] = result["Message"]
elif not result["Value"]:
successful[path] = {}
elif result["Value"][0][0]:
seDict = {}
totalSize = 0
totalFiles = 0
for size, files, seName in result["Value"]:
seDict[seName] = {"Size": int(size), "Files": int(files)}
totalSize += size
totalFiles += files
seDict["TotalSize"] = int(totalSize)
seDict["TotalFiles"] = int(totalFiles)
successful[path] = seDict
else:
successful[path] = {}
return S_OK({"Successful": successful, "Failed": failed})
def _rebuildDirectoryUsage(self):
"""Recreate and replenish the Storage Usage tables"""
req = "DROP TABLE IF EXISTS FC_DirectoryUsage_backup"
result = self.db._update(req)
req = "RENAME TABLE FC_DirectoryUsage TO FC_DirectoryUsage_backup"
result = self.db._update(req)
req = "CREATE TABLE `FC_DirectoryUsage` LIKE `FC_DirectoryUsage_backup`"
result = self.db._update(req)
if not result["OK"]:
return result
result = self.__rebuildDirectoryUsageLeaves()
if not result["OK"]:
return result
result = self.db.dtree.findDir("/")
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Directory / not found")
dirID = result["Value"]
result = self.__rebuildDirectoryUsage(dirID)
gLogger.verbose("Finished rebuilding Directory Usage")
return result
def __rebuildDirectoryUsageLeaves(self):
"""Rebuild DirectoryUsage entries for directories having files"""
req = "SELECT DISTINCT(DirID) FROM FC_Files"
result = self.db._query(req)
if not result["OK"]:
return result
dirIDs = [x[0] for x in result["Value"]]
gLogger.verbose("Starting rebuilding Directory Usage, number of visible directories %d" % len(dirIDs))
insertFields = ["DirID", "SEID", "SESize", "SEFiles", "LastUpdate"]
insertCount = 0
insertValues = []
count = 0
empty = 0
for dirID in dirIDs:
count += 1
# Get the physical size
req = "SELECT SUM(F.Size),COUNT(F.Size),R.SEID from FC_Files as F, FC_Replicas as R "
req += "WHERE F.FileID=R.FileID AND F.DirID=%d GROUP BY R.SEID" % int(dirID)
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
empty += 1
for seSize, seFiles, seID in result["Value"]:
insertValues = [dirID, seID, seSize, seFiles, "UTC_TIMESTAMP()"]
result = self.db.insertFields("FC_DirectoryUsage", insertFields, insertValues)
if not result["OK"]:
if "Duplicate" in result["Message"]:
req = "UPDATE FC_DirectoryUsage SET SESize=%d, SEFiles=%d, LastUpdate=UTC_TIMESTAMP()" % (
seSize,
seFiles,
)
req += " WHERE DirID=%s AND SEID=%s" % (dirID, seID)
result = self.db._update(req)
if not result["OK"]:
return result
return result
# Get the logical size
req = "SELECT SUM(Size),COUNT(Size) from FC_Files WHERE DirID=%d " % int(dirID)
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Empty directory")
seSize, seFiles = result["Value"][0]
insertValues = [dirID, 0, seSize, seFiles, "UTC_TIMESTAMP()"]
result = self.db.insertFields("FC_DirectoryUsage", insertFields, insertValues)
if not result["OK"]:
if "Duplicate" in result["Message"]:
req = "UPDATE FC_DirectoryUsage SET SESize=%d, SEFiles=%d, LastUpdate=UTC_TIMESTAMP()" % (
seSize,
seFiles,
)
req += " WHERE DirID=%s AND SEID=0" % dirID
result = self.db._update(req)
if not result["OK"]:
return result
else:
return result
gLogger.verbose("Processed %d directories, %d empty " % (count, empty))
return S_OK()
def __rebuildDirectoryUsage(self, directoryID):
"""Rebuild DirectoryUsage entries recursively for the given path"""
result = self.getChildren(directoryID)
if not result["OK"]:
return result
dirIDs = result["Value"]
resultDict = {}
for dirID in dirIDs:
result = self.__rebuildDirectoryUsage(dirID)
if not result["OK"]:
return result
dirDict = result["Value"]
for seID in dirDict:
resultDict.setdefault(seID, {"Size": 0, "Files": 0})
resultDict[seID]["Size"] += dirDict[seID]["Size"]
resultDict[seID]["Files"] += dirDict[seID]["Files"]
insertFields = ["DirID", "SEID", "SESize", "SEFiles", "LastUpdate"]
insertValues = []
for seID in resultDict:
size = resultDict[seID]["Size"]
files = resultDict[seID]["Files"]
req = "UPDATE FC_DirectoryUsage SET SESize=SESize+%d, SEFiles=SEFiles+%d WHERE DirID=%d AND SEID=%d"
req = req % (size, files, directoryID, seID)
result = self.db._update(req)
if not result["OK"]:
return result
if not result["Value"]:
insertValues = [directoryID, seID, size, files, "UTC_TIMESTAMP()"]
result = self.db.insertFields("FC_DirectoryUsage", insertFields, insertValues)
if not result["OK"]:
return result
req = "SELECT SEID,SESize,SEFiles from FC_DirectoryUsage WHERE DirID=%d" % directoryID
result = self.db._query(req)
if not result["OK"]:
return result
resultDict = {}
for seid, size, files in result["Value"]:
resultDict[seid] = {"Size": size, "Files": files}
return S_OK(resultDict)
def getDirectoryCounters(self, connection=False):
"""Get the total number of directories"""
conn = self._getConnection(connection)
resultDict = {}
req = "SELECT COUNT(*) from FC_DirectoryInfo"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Directories"] = res["Value"][0][0]
treeTable = self.getTreeTable()
req = "SELECT COUNT(DirID) FROM %s WHERE Parent NOT IN ( SELECT DirID from %s )" % (treeTable, treeTable)
req += " AND DirID <> 1"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Orphan Directories"] = res["Value"][0][0]
req = "SELECT COUNT(DirID) FROM %s WHERE DirID NOT IN ( SELECT Parent from %s )" % (treeTable, treeTable)
req += " AND DirID NOT IN ( SELECT DirID from FC_Files ) "
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Empty Directories"] = res["Value"][0][0]
req = "SELECT COUNT(DirID) FROM %s WHERE DirID NOT IN ( SELECT DirID FROM FC_DirectoryInfo )" % treeTable
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["DirTree w/o DirInfo"] = res["Value"][0][0]
req = "SELECT COUNT(DirID) FROM FC_DirectoryInfo WHERE DirID NOT IN ( SELECT DirID FROM %s )" % treeTable
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["DirInfo w/o DirTree"] = res["Value"][0][0]
return S_OK(resultDict)
| ic-hep/DIRAC | src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/DirectoryManager/DirectoryTreeBase.py | Python | gpl-3.0 | 46,573 |
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('WebKit2', '4.0')
from gi.repository import Gtk
from gi.repository import WebKit2 as WebKit
from src.Utils import Browser
COOKIES_TEXT = 0
COOKIES_SQLITE = 1
class MLBrowser():
def __init__(self):
self.window = Gtk.Window()
self.view = WebKit.WebView()
self._interfaceInit()
self.close_condition = lambda title : True
self.close_callback = lambda _ : True
self.view.connect('load_changed', self.onLoadChanged)
def _interfaceInit(self):
scroll = Gtk.ScrolledWindow()
scroll.add_with_viewport(self.view)
self.window.add(scroll)
def saveCookiesTo(self, savepath):
context = self.view.get_context()
cookie_manager = context.get_cookie_manager()
storage = WebKit.CookiePersistentStorage(COOKIES_TEXT)
cookie_manager.set_persistent_storage(savepath, storage)
def show(self):
self.window.show_all()
def close(self):
self.close_callback(self)
self.window.close()
def load(self, url):
self.view.load_uri(url)
def getUserAgent(self):
settings = self.view.get_settings()
return settings.get_user_agent()
def setCloseCondition(self, func):
self.close_condition = func
def setCloseCallback(self, func):
self.close_callback = func
def onLoadChanged(self, *args, **kwargs):
title = self.view.get_title()
if self.close_condition(title):
self.close()
def start():
browser = MLBrowser()
return browser
| fdibaldassarre/mload | src/Utils/Browser.py | Python | gpl-3.0 | 1,525 |
"""
owtf.lib.exceptions
~~~~~~~~~~~~~~~~~~~
Declares the framework exceptions and HTTP errors
"""
try:
from http.client import responses
except ImportError:
from httplib import responses
import tornado.web
class FrameworkException(Exception):
def __init__(self, value):
self.parameter = value
def __repr__(self):
return self.parameter
class APIError(tornado.web.HTTPError):
"""Equivalent to ``RequestHandler.HTTPError`` except for in name"""
def api_assert(condition, *args, **kwargs):
"""Assertion to fail with if not ``condition``
Asserts that ``condition`` is ``True``, else raises an ``APIError``
with the provided ``args`` and ``kwargs``
:type condition: bool
"""
if not condition:
raise APIError(*args, **kwargs)
class FrameworkAbortException(FrameworkException):
pass
class PluginAbortException(FrameworkException):
pass
class UnreachableTargetException(FrameworkException):
pass
class UnresolvableTargetException(FrameworkException):
pass
class DBIntegrityException(FrameworkException):
pass
class InvalidTargetReference(FrameworkException):
pass
class InvalidSessionReference(FrameworkException):
pass
class InvalidTransactionReference(FrameworkException):
pass
class InvalidParameterType(FrameworkException):
pass
class InvalidWorkerReference(FrameworkException):
pass
class InvalidErrorReference(FrameworkException):
pass
class InvalidWorkReference(FrameworkException):
pass
class InvalidConfigurationReference(FrameworkException):
pass
class InvalidUrlReference(FrameworkException):
pass
class InvalidActionReference(FrameworkException):
pass
class InvalidMessageReference(FrameworkException):
pass
class InvalidMappingReference(FrameworkException):
pass
class DatabaseNotRunningException(Exception):
pass
class PluginException(Exception):
pass
class PluginsDirectoryDoesNotExist(PluginException):
"""The specified plugin directory does not exist."""
class PluginsAlreadyLoaded(PluginException):
"""`load_plugins()` called twice."""
| owtf/owtf | owtf/lib/exceptions.py | Python | bsd-3-clause | 2,149 |
import discord
from discord.ext import commands
from discord.ext.commands import Group, Paginator
import difflib
import itertools
# Use DefaultHelpCommand?
class HelpCommand(commands.HelpCommand):
'''Custom Help Command'''
def __init__(self, **options):
attrs = options.setdefault("command_attrs", {})
attrs.setdefault("help", "Shows this message\n"
"Inputs in angle brackets, <>, are required\n"
"Inputs in square brackets, [], are optional\n"
"If you are not currently able to use a command in the channel where you executed help, "
"it will not be displayed in the corresponding help message")
super().__init__(**options)
# TODO: Mitigate code block cutoff issue
def command_not_found(self, string):
ctx = self.context
output = f"No command called `{string}` found"
close_matches = difflib.get_close_matches(string, ctx.bot.all_commands.keys(), n = 1)
if close_matches:
output += f"\nDid you mean `{close_matches[0]}`?"
return output
def subcommand_not_found(self, command, string):
if isinstance(command, Group) and command.all_commands:
return f"`{command.qualified_name}` command has no subcommand named {string}"
return f"`{command.qualified_name}` command has no subcommands"
def get_max_size(self, commands):
# Include subcommands
commands = commands.copy()
for command in commands.copy():
if isinstance(command, Group):
commands.extend(command.commands)
return super().get_max_size(commands)
async def send_bot_help(self, mapping):
ctx = self.context
description = " ".join(f"`{category}`" for category in sorted(ctx.bot.cogs, key = str.lower))
fields = (("For more info:", f"`{ctx.prefix}{self.invoked_with} [category]`\n"
f"`{ctx.prefix}{self.invoked_with} [command]`\n"
f"`{ctx.prefix}{self.invoked_with} [command] [subcommand]`"),
("Also see:", f"`{ctx.prefix}about`\n`"
f"{ctx.prefix}{self.invoked_with} help`\n"
f"`{ctx.prefix}{self.invoked_with} other`"), # TODO: Include stats?
("For all commands:", f"`{ctx.prefix}{self.invoked_with} all`", False))
await ctx.embed_reply(description, title = "Categories", fields = fields)
async def send_cog_help(self, cog):
ctx = self.context
paginator = Paginator(max_size = ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT)
if cog.description:
paginator.add_line(cog.description, empty = True)
filtered_commands = await self.filter_commands(cog.get_commands(), sort = True)
self.add_commands(self.get_max_size(filtered_commands), ctx.bot.EMBED_DESCRIPTION_CODE_BLOCK_ROW_CHARACTER_LIMIT,
filtered_commands, paginator)
if not paginator.pages:
return await ctx.embed_reply(title = f"{cog.qualified_name} Commands")
# TODO: Response when no description or permitted commands in cog?
if len(paginator.pages) == 1:
return await ctx.embed_reply(title = f"{cog.qualified_name} Commands", description = paginator.pages[0])
await ctx.whisper(embed = discord.Embed(title = f"{cog.qualified_name} Commands", description = paginator.pages[0], color = ctx.bot.bot_color))
for page in paginator.pages[1:]:
await ctx.whisper(embed = discord.Embed(description = page, color = ctx.bot.bot_color))
if ctx.channel.type is not discord.ChannelType.private:
await ctx.embed_reply("Check your DMs")
async def send_group_help(self, group):
subcommands = await self.filter_commands(group.commands, sort = True)
if not subcommands:
return await self.send_command_help(group)
ctx = self.context
title = self.get_command_signature(group)
if not group.help:
description = group.description
else:
description = group.help
if " " in group.help:
description = ctx.bot.CODE_BLOCK.format(description)
description += '\n' + group.description
if len(description) <= ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT:
embeds = [discord.Embed(title = title, description = description, color = ctx.bot.bot_color)]
else:
paginator = Paginator(max_size = ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT)
paginator.add_line(group.help, empty = True)
paginator.close_page() # Necessary?
embeds = [discord.Embed(title = title, description = paginator.pages[0], color = ctx.bot.bot_color)]
for page in paginator.pages[1:-1]:
embeds.append(discord.Embed(description = page, color = ctx.bot.bot_color))
if len(paginator.pages[-1] + group.description) + 1 > ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT:
embeds.append(discord.Embed(description = paginator.pages[-1], color = ctx.bot.bot_color))
embeds.append(discord.Embed(description = group.description, color = ctx.bot.bot_color))
else:
embeds.append(embed = discord.Embed(description = f"{paginator.pages[-1]}\n{group.description}",
color = ctx.bot.bot_color))
max_width = self.get_max_size(subcommands)
paginator = Paginator(max_size = ctx.bot.EMBED_FIELD_VALUE_CHARACTER_LIMIT)
self.add_commands(max_width, ctx.bot.EMBED_FIELD_VALUE_CODE_BLOCK_ROW_CHARACTER_LIMIT,
subcommands, paginator)
embeds[-1].add_field(name = f"Subcommands for {group}", value = paginator.pages[0], inline = False)
for page in paginator.pages[1:]:
if len(embeds[-1]) > ctx.bot.EMBED_TOTAL_CHARACTER_LIMIT:
embeds.append(discord.Embed(color = ctx.bot.bot_color))
embeds[-1].add_field(name = ctx.bot.ZERO_WIDTH_SPACE, value = page, inline = False)
if len(embeds) == 1:
await ctx.channel.send(embed = embeds[0].set_author(name = ctx.author.display_name,
icon_url = ctx.author.display_avatar.url))
else:
for embed in embeds:
await ctx.whisper(embed = embed)
if ctx.channel.type is not discord.ChannelType.private:
await ctx.embed_reply("Check your DMs")
async def send_command_help(self, command):
ctx = self.context
title = self.get_command_signature(command)
if not command.help:
return await ctx.embed_reply(title = title, description = command.description)
description = command.help
if " " in command.help:
description = ctx.bot.CODE_BLOCK.format(description)
description += '\n' + command.description
if len(description) <= ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT:
return await ctx.embed_reply(title = title, description = description)
paginator = Paginator(max_size = ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT)
paginator.add_line(command.help, empty = True)
paginator.close_page() # Necessary?
await ctx.whisper(embed = discord.Embed(title = title,
description = paginator.pages[0], color = ctx.bot.bot_color))
for page in paginator.pages[1:-1]:
await ctx.whisper(embed = discord.Embed(description = page, color = ctx.bot.bot_color))
if len(paginator.pages[-1] + command.description) + 1 > ctx.bot.EMBED_DESCRIPTION_CHARACTER_LIMIT:
await ctx.whisper(embed = discord.Embed(description = paginator.pages[-1], color = ctx.bot.bot_color))
await ctx.whisper(embed = discord.Embed(description = command.description, color = ctx.bot.bot_color))
else:
await ctx.whisper(embed = discord.Embed(description = f"{paginator.pages[-1]}\n{command.description}",
color = ctx.bot.bot_color))
if ctx.channel.type is not discord.ChannelType.private:
await ctx.embed_reply("Check your DMs")
async def send_all_help(self):
ctx = self.context
def get_category(command):
return command.cog_name or f"{ctx.bot.ZERO_WIDTH_SPACE}No Category"
# Zero width space to position as last category when sorted
filtered_commands = await self.filter_commands(ctx.bot.commands, sort = True,
key = lambda c: get_category(c).lower())
embed = discord.Embed(title = "My Commands", color = ctx.bot.bot_color)
for category, commands in itertools.groupby(filtered_commands, key = get_category):
commands = sorted(commands, key = lambda c: c.name)
paginator = Paginator(max_size = ctx.bot.EMBED_FIELD_VALUE_CHARACTER_LIMIT)
self.add_commands(self.get_max_size(filtered_commands), ctx.bot.EMBED_FIELD_VALUE_CODE_BLOCK_ROW_CHARACTER_LIMIT,
commands, paginator)
total_category_characters = len(category) + len(paginator.pages) - 1 + len(paginator)
if (len(embed) + total_category_characters > ctx.bot.EMBED_TOTAL_CHARACTER_LIMIT or
len(embed.fields) + len(paginator.pages) > ctx.bot.EMBED_FIELD_AMOUNT_LIMIT):
await ctx.whisper(embed = embed)
embed = discord.Embed(color = ctx.bot.bot_color)
embed.add_field(name = category, value = paginator.pages[0], inline = False)
for page in paginator.pages[1:]:
embed.add_field(name = ctx.bot.ZERO_WIDTH_SPACE, value = page, inline = False)
await ctx.whisper(embed = embed)
if ctx.channel.type is not discord.ChannelType.private:
await ctx.embed_reply("Check your DMs")
def add_commands(self, max_width, line_limit, commands, paginator):
lines = []
# Add 3 for "┣ "/"┗ "
for command in commands:
if isinstance(command, Group) and command.commands:
max_width += 3
break
for command in commands:
prefix = "┃ " if isinstance(command, Group) and command.commands else " "
buffer = 2 if isinstance(command, Group) and command.commands else 0
line = f"{command.name:<{max_width}} {command.short_doc}"
lines.extend(self.wrap_line(line, max_width, line_limit, prefix, buffer))
# Add subcommands of commands
if isinstance(command, Group) and command.commands:
subcommands = sorted(command.commands, key = lambda c: c.name)
for subcommand in subcommands[:-1]:
line = f"┣ {subcommand.name:<{max_width - 2}} {subcommand.short_doc}"
lines.extend(self.wrap_line(line, max_width, line_limit, "┃ ", 1))
line = f"┗ {subcommands[-1].name:<{max_width - 2}} {subcommands[-1].short_doc}"
lines.extend(self.wrap_line(line, max_width, line_limit, " ", 0))
for line in lines:
paginator.add_line(line)
def wrap_line(self, line, max_width, limit, prefix, buffer):
ctx = self.context
if '┣' in prefix + line or '┗' in prefix + line:
limit -= 1
if len(line) <= limit:
return [line]
cutoff = line[:limit].rfind(' ')
lines = [line[:cutoff]]
while len(prefix) + max_width + 2 - buffer + len(line[cutoff + 1:]) >= limit:
new_cutoff = line[:cutoff + limit - len(prefix) - max_width - 2 + buffer].rfind(' ')
lines.append(prefix + ' ' * (max_width + 2 - buffer) + line[cutoff + 1:new_cutoff])
cutoff = new_cutoff
lines.append(prefix + ' ' * (max_width + 2 - buffer) + line[cutoff + 1:])
return lines
# @commands.bot_has_permissions(embed_links = True)
async def command_callback(self, ctx, *, command : str = None):
await self.prepare_help_command(ctx, command)
if command == "all":
'''All commands'''
return await self.send_all_help()
if command == "other":
'''Additional commands and information'''
# TODO: Update
# TODO: Add last updated date?
fields = (("Conversion Commands", f"see `{ctx.prefix}conversions`", False),
("In Progress", "gofish redditsearch roleposition rolepositions taboo userlimit webmtogif whatis", False),
("Misc", "invite randomgame test test_on_message", False),
("Owner Only", "allcommands changenickname deletetest cleargame clearstreaming echo eval exec load reload repl restart servers setgame setstreaming shutdown unload updateavatar", False),
("No Prefix", "@Harmonbot :8ball: (exactly: f|F) (anywhere in message: getprefix)", False))
return await ctx.embed_reply(f"See `{ctx.prefix}help` for the main commands",
title = f"Commands not in {ctx.prefix}help", fields = fields)
if not command:
mapping = self.get_bot_mapping()
return await self.send_bot_help(mapping)
cog = ctx.bot.get_cog(command)
if cog:
return await self.send_cog_help(cog)
keys = command.split()
command = ctx.bot.all_commands.get(keys[0])
if not command:
# TODO: Use entire input?
cog = discord.utils.find(lambda c: c[0].lower() == keys[0].lower(), ctx.bot.cogs.items())
if cog:
cog = cog[1]
return await self.send_cog_help(cog)
return await ctx.embed_reply(self.command_not_found(self.remove_mentions(keys[0])))
for key in keys[1:]:
if not isinstance(command, Group) or key not in command.all_commands:
# TODO: Pass aliases used?
return await ctx.embed_reply(self.subcommand_not_found(command, self.remove_mentions(key)))
command = command.all_commands[key]
# TODO: Pass alias used?
if isinstance(command, Group):
return await self.send_group_help(command)
else:
return await self.send_command_help(command)
| Harmon758/Harmonbot | Discord/utilities/help_command.py | Python | mit | 12,533 |
# -*- coding: utf-8 -*-
import os
import sys
import shutil
from django.core.management.base import AppCommand
from django.core.management.color import color_style
from django_extensions.management.utils import _make_writeable, signalcommand
class Command(AppCommand):
help = "Creates a Django jobs command directory structure for the given app name in the current directory."
requires_system_checks = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
@signalcommand
def handle_app_config(self, app, **options):
copy_template('jobs_template', app.path, **options)
def copy_template(template_name, copy_to, **options):
"""copies the specified template directory to the copy_to location"""
import django_extensions
style = color_style()
ERROR = getattr(style, 'ERROR', lambda x: x)
SUCCESS = getattr(style, 'SUCCESS', lambda x: x)
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f).rstrip(".tmpl")
if os.path.exists(path_new):
if options.get('verbosity', 1) > 1:
print(ERROR("%s already exists" % path_new))
continue
if options.get('verbosity', 1) > 1:
print(SUCCESS("%s" % path_new))
with open(path_old, 'r') as fp_orig:
with open(path_new, 'w') as fp_new:
fp_new.write(fp_orig.read())
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
| neilpelow/wmap-django | venv/lib/python3.5/site-packages/django_extensions/management/commands/create_jobs.py | Python | gpl-3.0 | 2,426 |
import ldap
import ldap.modlist
import collections
import random
# Example:
# with provision("ldap://localhost", "cn=admin,dc=nodomain", "foobar") as p:
# c = p.container(attr={"description":"foo"})
# l = p.leaf(c, attr={"description":"bar"})
def salt():
rng = random.SystemRandom()
return rng.randrange(100000000)
class LdapObject(object):
def __repr__(self):
return self.dn
def __iter__(self):
return
yield
class Container(LdapObject):
def __init__(self, parent, name, objectClass, dnComponent, attr, anAttribute):
if name:
self.name = name
else:
self.name = "Container%s" % salt()
self.objectClass = objectClass
self.dnComponent = dnComponent
self.anAttribute = anAttribute
self.parent = parent
self.rdn = "%s=%s" % (dnComponent, self.name)
self.dn = "%s,%s" % (self.rdn, parent)
if attr:
self.attr = attr
else:
self.attr = {}
self.attr["objectClass"] = objectClass
self.attr[dnComponent] = self.name
self.children = []
def append(self, child):
self.children.append(self.children)
def __iter__(self):
return self.children.__iter__()
class Leaf(LdapObject):
def __init__(self, parent, name, objectClass, dnComponent, attr, anAttribute):
if name:
self.name = name
else:
self.name = "Leaf%s" % salt()
self.objectClass = objectClass
self.dnComponent = dnComponent
self.anAttribute = anAttribute
self.parent = parent
self.rdn = "%s=%s" % (dnComponent, self.name)
self.dn = "%s,%s" % (self.rdn, parent)
if attr:
self.attr = attr
else:
self.attr = {}
self.attr["objectClass"] = objectClass
self.attr[dnComponent] = self.name
class Provisioning:
def __init__(self, uri, bindDN, password):
self.ldap = ldap.initialize (uri)
self.ldap.simple_bind_s (bindDN, password)
self.provisionedDNs = collections.deque()
def add(self, obj):
ldif = ldap.modlist.addModlist(obj.attr)
self.ldap.add_s(obj.dn, ldif)
self.provisionedDNs.appendleft(obj)
def delete(self, dn):
try:
results = self.ldap.search_s(str(dn), ldap.SCOPE_BASE)
print "DEBUG: Deleting object %s: %s" % (dn, results)
self.ldap.delete_s(str(dn))
except ldap.NO_SUCH_OBJECT:
print "WARNING: %s already deleted" % dn
def attribute(self, dn, attribute):
results = self.ldap.search_s(str(dn), ldap.SCOPE_BASE, attrlist = [attribute])
_, attributes = results[0]
try:
return attributes[attribute]
except KeyError:
return []
def exists (self, dn):
try:
result = self.ldap.search_s(str(dn), ldap.SCOPE_BASE, attrlist = ["objectClass"])
assert result[0][1]["objectClass"]
return True
except ldap.NO_SUCH_OBJECT:
return False
def container(self, parent = None, name = None,
objectClass = "organizationalUnit", dnComponent="ou",
anAttribute = "description", attr = None):
if not parent:
parent = self.root
c = Container(parent, name=name, attr=attr, objectClass = objectClass,
dnComponent = dnComponent, anAttribute = anAttribute)
self.add(c)
return c
def leaf(self, parent = None, name = None,
objectClass = "organizationalRole", dnComponent="cn",
anAttribute = "description", attr = None):
if not parent:
parent = self.root
l = Leaf(parent, name=name, attr=attr, objectClass=objectClass,
dnComponent=dnComponent, anAttribute=anAttribute)
self.add(l)
return l
def unprovision(self):
for obj in self.provisionedDNs:
self.delete(obj)
@property
def root(self):
results = self.ldap.search_s ("", ldap.SCOPE_BASE, attrlist = ["namingContexts"])
roots = results[0][1]["namingContexts"]
return roots[0]
class provision():
def __init__(self, uri, bindDN, password):
self.p = Provisioning(uri, bindDN, password)
def __enter__(self):
return self.p
def __exit__(self, type, value, traceback):
self.p.unprovision()
return False
| rootmos/ldapy | test/provisioning.py | Python | gpl-3.0 | 4,529 |
"""custom command to build doc.zip file"""
#=============================================================================
# imports
#=============================================================================
# core
import os
from distutils import dir_util
from distutils.cmd import Command
from distutils.errors import *
from distutils.spawn import spawn
# local
__all__ = [
"docdist"
]
#=============================================================================
# command
#=============================================================================
class docdist(Command):
description = "create zip file containing standalone html docs"
user_options = [
('build-dir=', None, 'Build directory'),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('sign', 's', 'sign files using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
def initialize_options(self):
self.build_dir = None
self.dist_dir = None
self.format = None
self.keep_temp = False
self.sign = False
self.identity = None
def finalize_options(self):
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
if self.build_dir is None:
cmd = self.get_finalized_command('build')
self.build_dir = os.path.join(cmd.build_base, 'docdist')
if not self.dist_dir:
self.dist_dir = "dist"
if not self.format:
self.format = "zip"
def run(self):
# call build sphinx to build docs
self.run_command("build_sphinx")
cmd = self.get_finalized_command("build_sphinx")
source_dir = cmd.builder_target_dir
# copy to directory with appropriate name
dist = self.distribution
arc_name = "%s-docs-%s" % (dist.get_name(), dist.get_version())
tmp_dir = os.path.join(self.build_dir, arc_name)
if os.path.exists(tmp_dir):
dir_util.remove_tree(tmp_dir, dry_run=self.dry_run)
self.copy_tree(source_dir, tmp_dir, preserve_symlinks=True)
# make archive from dir
arc_base = os.path.join(self.dist_dir, arc_name)
self.arc_filename = self.make_archive(arc_base, self.format,
self.build_dir)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", self.arc_filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# cleanup
if not self.keep_temp:
dir_util.remove_tree(tmp_dir, dry_run=self.dry_run)
#=============================================================================
# eof
#=============================================================================
| cgstudiomap/cgstudiomap | main/eggs/passlib-1.6.5-py2.7.egg/passlib/_setup/docdist.py | Python | agpl-3.0 | 3,109 |
'''
development-py (c) University of Manchester 2017
development-py is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-few-public-methods
import sys
import cobra
from cobra.util.solver import linear_reaction_coefficients
from cobra.flux_analysis.single_deletion import single_gene_deletion
# from cobra.flux_analysis.double_deletion import double_gene_deletion
class Analyser(object):
'''Class to analyse a metabolic model.'''
def __init__(self, filename):
self.__model = cobra.io.read_sbml_model(filename)
def analyse(self, obj='EX_pinocembrin', obj_low_bound_pc=0.8):
'''Analyse model.'''
solution = self.__model.optimize()
print solution
print
# Fix objective lower bounds:
initial_coeffs = linear_reaction_coefficients(self.__model)
for coeff in initial_coeffs:
coeff.lower_bound = solution.fluxes[coeff.id] * obj_low_bound_pc
self.__model.objective = obj
solution = self.__model.optimize()
print solution
print
self.__model.summary()
print
for initial_coeff in initial_coeffs:
print initial_coeff.id + '\t' + \
str(solution.fluxes[initial_coeff.id])
print
fluxes = solution.fluxes[abs(solution.fluxes) >
solution.objective_value / 100].to_frame()
fluxes = fluxes.reset_index()
fluxes['reaction'] = fluxes['index'].apply(self.__get_react_str)
fluxes.sort_values(by='fluxes').to_csv('fluxes.csv', index=False)
single_del = single_gene_deletion(self.__model)
single_del.to_csv('single.csv')
# double_del = double_gene_deletion(self.__model, return_frame=True)
# double_del.to_csv('double.csv')
def __get_react_str(self, react_id):
'''Get reaction string from reaction id.'''
reaction = self.__model.reactions.get_by_id(react_id)
return reaction.build_reaction_string()
def main(args):
'''main method.'''
analyser = Analyser(args[0])
analyser.analyse()
if __name__ == '__main__':
main(sys.argv[1:])
| neilswainston/development-py | synbiochemdev/fba/analyse.py | Python | mit | 2,258 |
def main() -> None:
"""This is the entry point of the application!
Raises:
:class:`KeyboardInterrupt`, :class:`SystemExit`
"""
pass
| icgood/continuous-docs | docpkg/main.py | Python | unlicense | 160 |
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
#
# $Id: test_ANDI.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the ANDI opcode.
"""
import base_test
from registers import Reg, SREG
class ANDI_TestFail(base_test.TestFail): pass
class base_ANDI(base_test.opcode_test):
"""Generic test case for testing ANDI opcode.
ANDI - Logical AND with Immediate
opcode is '0111 KKKK dddd KKKK' where 16 <= d <= 31, 0 <= K <= 255
Only registers PC, Rd and SREG should be changed.
"""
def setup(self):
# Set SREG to have only V set (opcode should clear it)
self.setup_regs[Reg.SREG] = 1 << SREG.V
# Set the register values
self.setup_regs[self.Rd] = self.Vd
# Return the raw opcode
return 0x7000 | ((self.Rd - 16) << 4) | ((self.Vk & 0xf0) << 4) | (self.Vk & 0xf)
def analyze_results(self):
self.reg_changed.extend( [self.Rd, Reg.SREG] )
# check that result is correct
expect = ((self.Vd & self.Vk) & 0xff)
got = self.anal_regs[self.Rd]
if expect != got:
self.fail('ANDI r%02d, 0x%02x: 0x%02x & 0x%02x = (expect=%02x, got=%02x)' % (
self.Rd, self.Vk, self.Vd, self.Vk, expect, got))
expect_sreg = 0
# calculate what we expect sreg to be (I, T, H, V and C should be zero)
V = 0
N = ((expect & 0x80) != 0)
expect_sreg += N << SREG.N
expect_sreg += (N ^ V) << SREG.S
expect_sreg += (expect == 0) << SREG.Z
got_sreg = self.anal_regs[Reg.SREG]
if expect_sreg != got_sreg:
self.fail('ANDI r%02d, 0x%02x: 0x%02x + 0x%02x -> SREG (expect=%02x, got=%02x)' % (
self.Rd, self.Vk, self.Vd, self.Vk, expect_sreg, got_sreg))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class ANDI_r%02d_v%02x_k%02x_TestFail(ANDI_TestFail): pass
class test_ANDI_r%02d_v%02x_k%02x(base_ANDI):
Rd = %d
Vd = 0x%x
Vk = 0x%x
def fail(self,s):
raise ANDI_r%02d_v%02x_k%02x_TestFail, s
"""
#
# Define a list of test values such that we test all the cases of SREG bits being set.
#
vals = (
( 0x00, 0x00 ),
( 0xff, 0x00 ),
( 0xfe, 0x01 ),
( 0x0f, 0x00 ),
( 0x0f, 0xf0 ),
( 0x01, 0x02 ),
( 0x80, 0x80 )
)
#
# automagically generate the test_ANDI_rNN_vXX_rrNN_kXX class definitions.
#
code = ''
for d in range(16,32):
for vd,vk in vals:
args = (d,vd,vk)*4
code += template % args
exec code
| zouppen/simulavr | regress/test_opcodes/test_ANDI.py | Python | gpl-2.0 | 3,274 |
"""Contains the DeviceTypeParser class, for parsing reg-*.bld files."""
import os
import fnmatch
import logging
from bioloid.device_type import DeviceTypes, DeviceType
from bioloid.register import get_register_class
from bioloid.parse_utils import parse_int
DEBUG = True
class DeviceTypeParser(object):
"""Parses a reg-*.bld file to create DeviceTypes."""
def __init__(self, dev_types, log=None):
self.dev_types = dev_types
self.dev_type_name = None
self.model = None
self.registers = None
self.reg_offset = None
self.reg_name = None
self.reg_size = None
self.reg_access = None
self.reg_min = None
self.reg_max = None
self.line_num = 0
self.filename = None
self.error_encountered = False
self.log = log or logging.getLogger(__name__)
self.reset()
def reset(self):
"""Sets the parser back to its default state."""
self.dev_type_name = None
self.model = None
self.registers = []
def parse_dev_type_files(self, dirname):
"""Finds all files in dir which match reg-*.bld and parses the files
as device types.
"""
for filename in os.listdir(dirname):
fullname = os.path.join(dirname, filename)
if not os.path.isfile(fullname):
continue
if fnmatch.fnmatch(filename, "reg-*.bld"):
self.parse_file(fullname)
def parse_file(self, filename):
"""Parses a file and adds parsed device types to the dev_types
object passed into the constructor.
"""
self.filename = filename
self.line_num = 0
self.error_encountered = False
with open(filename) as bld_file:
for line in bld_file:
self.line_num += 1
# Strip comments
comment_idx = line.find("#")
if comment_idx >= 0:
line = line[0:comment_idx]
words = line.split()
if len(words) == 0:
# Ignore blank lines
continue
try:
self.parse_line(words)
except ValueError as ex:
self.log.error("Error: file '%s' line %d: %s",
self.filename, self.line_num, str(ex))
self.error_encountered = True
return not self.error_encountered
def parse_line(self, words):
"""Parses a single line from the file."""
if DEBUG:
self.log.debug("parse_line: %s", ' '.join(words))
cmd = words.pop(0)
if self.dev_type_name:
if cmd in DeviceTypeParser.dev_type_cmds:
DeviceTypeParser.dev_type_cmds[cmd](self, words)
return
raise ValueError("Unrecognized keyword: %s" % cmd)
if cmd == "DeviceType:":
self.parse_device_type(words)
return
raise ValueError("Unexpected keyword outside a device type: %s" % cmd)
def parse_device_type(self, words):
"""Parses the 'DeviceType:' keyword."""
if len(words) != 1:
raise ValueError("DeviceType: expecting 1 arguent")
self.dev_type_name = words[0]
def parse_model(self, words):
"""Parses the 'Model:' keyword."""
if len(words) != 1:
raise ValueError("Model: expecting 1 arguent")
self.model = parse_int(words[0], "model")
def parse_register(self, words):
"""Parses the Register: keyword."""
if len(words) < 4:
raise ValueError("Expecting offset, name, size, and access")
self.reg_offset = parse_int(words.pop(0), "offset")
self.reg_name = words.pop(0)
self.reg_size = parse_int(words.pop(0), "size")
if self.reg_size != 1 and self.reg_size != 2 and self.reg_size != 4:
raise ValueError("Register '%s' size must be 1, 2 or 4. Found: %s"
% (self.reg_name, self.reg_size))
self.reg_access = words.pop(0)
if self.reg_access != "ro" and self.reg_access != "rw":
raise ValueError("Register %s: access must be ro or rw. Found: %s"
% (self.reg_name, self.reg_access))
self.reg_min = None
self.reg_max = None
if len(words) == 2 or len(words) == 3:
self.reg_min = parse_int(words.pop(0), "min")
self.reg_max = parse_int(words.pop(0), "max")
elif len(words) > 1:
raise ValueError("Register " + self.reg_name +
": Expecting 'type' or 'min max type'. " +
"Found %d arguments" % len(words))
reg_type = words[0] if len(words) > 0 else ""
reg_class = get_register_class(reg_type)
if reg_class is None:
raise ValueError("Register %s: Unknown register type: '%s'"
% (self.reg_name, reg_type))
reg = reg_class(self.reg_offset, self.reg_name, self.reg_size,
self.reg_access, self.reg_min, self.reg_max)
self.registers.append(reg)
def parse_end_device_type(self, words):
"""Parses the 'EndDeviceType' keyword."""
if len(words) != 0:
raise ValueError("EndDeviceType: not expecting any arguents")
if self.error_encountered:
raise ValueError("Not adding device type due to errors.")
dev_type = DeviceType(self.dev_type_name, self.model,
self.registers)
self.dev_types.add(dev_type)
self.reset()
dev_type_cmds = {
"Model:": parse_model,
"Register:": parse_register,
"EndDeviceType": parse_end_device_type
}
def test_main():
"""Test function."""
from bioloid.log_setup import log_setup
log_setup(cfg_path='../logging.cfg')
log = logging.getLogger()
log.setLevel(logging.DEBUG)
dev_types = DeviceTypes()
parser = DeviceTypeParser(dev_types)
parser.parse_file('../reg-servo.bld')
dev_type = dev_types.get('servo')
dev_type.dump_regs()
if __name__ == '__main__':
test_main()
| dhylands/Bioloid | bioloid/device_type_parser.py | Python | mit | 6,252 |
"""Tango Sphinx extension to automatically generate documentation
from a HLAPI Tango Device class."""
# Imports
from importlib import import_module
from sphinx.util import force_decode
from sphinx.application import Sphinx
from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
from sphinx.ext.autodoc import ClassLevelDocumenter
from collections import defaultdict
# Mock
class BaseMock(object):
"""Mocking base class."""
def __init__(self, func=None, **kwargs):
"""Save kwargs and function documentation."""
self.kwargs = kwargs
self.func_doc = func.__doc__ if func else None
def __call__(self, func):
"""Decorator support."""
self.func_doc = func.__doc__
return self
def setter(self, func):
"""Decorator support."""
return self
def deleter(self, func):
"""Decorator support."""
return self
def __repr__(self):
"""Generate a readable representation."""
args = []
# Add type
name = type(self).__name__.replace('_', ' ')
base = "{}:\n".format(name.capitalize())
# Add kwargs
for key, value in sorted(self.kwargs.items()):
if key in ["doc", "fget", "fset", "fisallowed"]:
continue
if value == "":
value = "None"
try:
value = value.__name__
except AttributeError:
pass
args.append(" - {0} : {1}".format(key, value))
if not args:
return base[:-2] + '.'
return base + '\n'.join(args)
def get_doc(self, encoding=None):
"""Get the documentation from the object."""
doc = self.func_doc or self.kwargs.get('doc') or ''
return force_decode(doc, encoding)
# Tango mock
class class_property(BaseMock):
"""Mock for class property."""
pass
class device_property(BaseMock):
"""Mock for device property."""
class attribute(BaseMock):
"""Mock for TANGO attribute."""
def write(self, method):
pass
class command(BaseMock):
"""Mock for TANGO command."""
__tango_command__ = True
__name__ = "tango_command"
class DeviceMeta(type):
"""Mock for device metaclass."""
pass
class Device(object):
"""Mock for device class."""
__metaclass__ = DeviceMeta
def run_server(self, cls):
pass
# Monkey patching
def pytango_patch():
from PyTango import server
server.attribute = attribute
server.command = command
server.device_property = device_property
server.class_property = class_property
server.Device = Device
server.DeviceMeta = DeviceMeta
# Reload object
def reload_object(obj):
"""Reload an object if possible"""
if not isinstance(obj, type):
return obj
try:
module = reload(import_module(obj.__module__))
return getattr(module, obj.__name__)
except:
return obj
# Tango device documenter
class TangoDeviceDocumenter(ClassDocumenter):
""" Documenter for tango device classes."""
objtype = 'tangodevice'
directivetype = 'class'
section = "{0} Device Documentation"
valid_types = (attribute, class_property, device_property, command)
priority = ClassDocumenter.priority
priority += 1
def import_object(self):
reload(import_module(self.modname))
return ClassDocumenter.import_object(self)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
member = reload_object(member)
return isinstance(member, DeviceMeta)
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
"""Patch to add a header."""
# Get object
if not self.parse_name() or not self.import_object():
return
# Add header
if all_members:
self.indent, temp = '', self.indent
section = self.section.format(self.object.__name__)
self.add_line(section, '<autodoc>')
self.add_line("*" * len(section), '<autodoc>')
self.indent = temp
# Generate documentation
ClassDocumenter.generate(self, more_content, real_modname,
check_module, all_members)
def filter_members(self, members, want_all):
"""Filter to keep only objects of valid types."""
filt = lambda arg: isinstance(arg[1], self.valid_types)
filtered_members = filter(filt, members)
return [(name, member, True) for name, member in filtered_members]
def document_members(self, all_members=False):
"""Prepare environment for automatic device documentation"""
if all_members:
self.options.member_order = 'groupwise'
self.env.config.autodevice = True
TangoItemDocumenter.reset()
ClassDocumenter.document_members(self, all_members)
# Tango item documenter
class TangoItemDocumenter(ClassLevelDocumenter):
"""Base class for documenting tango objects
(device properties, attirbutes and commands).
"""
objtype = 'tangoitem'
directivetype = 'attribute'
member_order = -1
types = [class_property, device_property, attribute, command]
priority = AttributeDocumenter.priority + 1
started = defaultdict(bool)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return any(isinstance(member, mocktype) for mocktype in cls.types)
@classmethod
def reset(cls):
cls.started.clear()
def import_object(self):
"""Load an object."""
# Get the object
if not ClassLevelDocumenter.import_object(self):
return False
# Reload modules
self.parent = reload_object(self.parent)
reload(import_module(self.modname))
# Get the new object
return ClassLevelDocumenter.import_object(self)
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
"""Patch to add a header."""
# Get object
if not self.parse_name() or not self.import_object():
return
# Check if header needed
tangotype = type(self.object)
autodevice = getattr(self.env.config, 'autodevice', False)
if autodevice and not self.started[tangotype]:
# Tag as started
self.started[tangotype] = True
self.indent, temp = '', self.indent
# Add header
self.add_line(self.section, '<autodoc>')
self.add_line("-" * len(self.section), '<autodoc>')
self.indent = temp
# Generate documentation
ClassLevelDocumenter.generate(self, more_content, real_modname,
check_module, all_members)
def get_doc(self, encoding=None, ignore=1):
"""Patch to get the docs from the mock object."""
NL = '\n'
MU = ' |'
obj_repr = repr(self.object).replace(NL, NL+MU) + NL
obj_doc = self.object.get_doc(encoding) + NL
return [obj_repr.split(NL), obj_doc.split(NL)]
def add_content(self, more_content, no_docstring=False):
"""Patch to add the documentation from the mock object
before any other documentation."""
encoding = self.analyzer and self.analyzer.encoding
docstrings = self.get_doc(encoding)
for i, line in enumerate(self.process_doc(docstrings)):
self.add_line(line, '<autodoc>', i)
ClassLevelDocumenter.add_content(self, more_content, True)
# Tango class property documenter
class TangoClassPropertyDocumenter(TangoItemDocumenter):
priority = TangoItemDocumenter.priority + 1
objtype = 'tangoclassproperty'
section = "Class properties"
types = [class_property]
member_order = 60
# Tango property documenter
class TangoPropertyDocumenter(TangoItemDocumenter):
priority = TangoItemDocumenter.priority + 1
objtype = 'tangoproperty'
section = "Device properties"
types = [device_property]
member_order = 70
# Tango attribute documenter
class TangoAttributeDocumenter(TangoItemDocumenter):
priority = TangoItemDocumenter.priority + 1
objtype = 'tangoattribute'
section = "Attributes"
types = [attribute]
member_order = 80
# Tango command documenter
class TangoCommandDocumenter(TangoItemDocumenter):
priority = TangoItemDocumenter.priority + 1
objtype = 'tangocommand'
section = "Commands"
types = [command]
member_order = 90
# Setup the sphinx extension
def setup(app):
"""Sphinx extension setup function."""
if not isinstance(app, Sphinx):
return
pytango_patch()
app.add_autodocumenter(TangoDeviceDocumenter)
app.add_autodocumenter(TangoAttributeDocumenter)
app.add_autodocumenter(TangoPropertyDocumenter)
app.add_autodocumenter(TangoClassPropertyDocumenter)
app.add_autodocumenter(TangoCommandDocumenter)
app.add_autodocumenter(TangoItemDocumenter)
| vxgmichel/python-tango-devicedoc | devicedoc/devicedoc.py | Python | gpl-3.0 | 9,087 |
#!/usr/bin/env python
#
# Copyright 2008,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
import numpy
from gnuradio import gr, gr_unittest, wavelet, analog, blocks
import copy
#import pygsl.wavelet as wavelet # FIXME: pygsl not checked for in config
import math
def sqr(x):
return x*x
def np2(k):
m = 0
n = k - 1
while n > 0:
m += 1
return m
class test_classify(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
# def test_000_(self):
# src_data = numpy.zeros(10)
# trg_data = numpy.zeros(10)
# src = blocks.vector_source_f(src_data)
# dst = blocks.vector_sink_f()
# self.tb.connect(src, dst)
# self.tb.run()
# rsl_data = dst.data()
# sum = 0
# for (u,v) in zip(trg_data, rsl_data):
# w = u - v
# sum += w * w
# sum /= float(len(trg_data))
# assert sum < 1e-6
def test_001_(self):
src_data = numpy.array([-1.0, 1.0, -1.0, 1.0])
trg_data = src_data * 0.5
src = blocks.vector_source_f(src_data)
dst = blocks.vector_sink_f()
rail = analog.rail_ff(-0.5, 0.5)
self.tb.connect(src, rail)
self.tb.connect(rail, dst)
self.tb.run()
rsl_data = dst.data()
sum = 0
for (u, v) in zip(trg_data, rsl_data):
w = u - v
sum += w * w
sum /= float(len(trg_data))
assert sum < 1e-6
def test_002_(self):
src_data = numpy.array([-1.0,
-1.0 / 2.0,
-1.0 / 3.0,
-1.0 / 4.0,
-1.0 / 5.0])
trg_data = copy.deepcopy(src_data)
src = blocks.vector_source_f(src_data, False, len(src_data))
st = blocks.stretch_ff(-1.0 / 5.0, len(src_data))
dst = blocks.vector_sink_f(len(src_data))
self.tb.connect(src, st)
self.tb.connect(st, dst)
self.tb.run()
rsl_data = dst.data()
sum = 0
for (u, v) in zip(trg_data, rsl_data):
w = u - v
sum += w * w
sum /= float(len(trg_data))
assert sum < 1e-6
def test_003_(self):
src_grid = (0.0, 1.0, 2.0, 3.0, 4.0)
trg_grid = copy.deepcopy(src_grid)
src_data = (0.0, 1.0, 0.0, 1.0, 0.0)
src = blocks.vector_source_f(src_data, False, len(src_grid))
sq = wavelet.squash_ff(src_grid, trg_grid)
dst = blocks.vector_sink_f(len(trg_grid))
self.tb.connect(src, sq)
self.tb.connect(sq, dst)
self.tb.run()
rsl_data = dst.data()
sum = 0
for (u, v) in zip(src_data, rsl_data):
w = u - v
sum += w * w
sum /= float(len(src_data))
assert sum < 1e-6
# def test_004_(self): # FIXME: requires pygsl
#
# n = 256
# o = 4
# ws = wavelet.workspace(n)
# w = wavelet.daubechies(o)
#
# a = numpy.arange(n)
# b = numpy.sin(a*numpy.pi/16.0)
# c = w.transform_forward(b, ws)
# d = w.transform_inverse(c, ws)
#
# src = gr.vector_source_f(b, False, n)
# wv = wavelet.wavelet_ff(n, o, True)
# src = blocks.vector_source_f(b, False, n)
# wv = wavelet.wavelet_ff(n, o, True)
#
# dst = blocks.vector_sink_f(n)
# self.tb.connect(src, wv)
# self.tb.connect(wv, dst)
# self.tb.run()
# e = dst.data()
#
# sum = 0
# for (u, v) in zip(c, e):
# w = u - v
# sum += w * w
# sum /= float(len(c))
# assert sum < 1e-6
def test_005_(self):
src_data = (1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0)
dwav = numpy.array(src_data)
wvps = numpy.zeros(3)
# wavelet power spectrum
scl = 1.0 / sqr(dwav[0])
k = 1
for e in range(len(wvps)):
wvps[e] = scl*sqr(dwav[k:k+(0o1<<e)]).sum()
k += 0o1<<e
src = blocks.vector_source_f(src_data, False, len(src_data))
kon = wavelet.wvps_ff(len(src_data))
dst = blocks.vector_sink_f(int(math.ceil(math.log(len(src_data), 2))))
self.tb.connect(src, kon)
self.tb.connect(kon, dst)
self.tb.run()
snk_data = dst.data()
sum = 0
for (u,v) in zip(snk_data, wvps):
w = u - v
sum += w * w
sum /= float(len(snk_data))
assert sum < 1e-6
if __name__ == '__main__':
gr_unittest.run(test_classify, "test_classify.xml")
| iohannez/gnuradio | gr-wavelet/python/wavelet/qa_classify.py | Python | gpl-3.0 | 5,378 |
# -*- coding: utf-8 -*-
import os
import mock
import unittest
from flask import Flask
from nose.tools import * # noqa (PEP8 asserts)
import datetime
from tests.base import OsfTestCase
from tests.factories import RegistrationFactory
from framework.routing import Rule, json_renderer
from framework.utils import secure_filename
from website.routes import process_rules, OsfWebRenderer
from website import settings
from website import util
from website.util import paths
from website.util.mimetype import get_mimetype
from website.util import web_url_for, api_url_for, is_json_request, waterbutler_url_for, conjunct, api_v2_url
from website.project import utils as project_utils
try:
import magic # noqa
LIBMAGIC_AVAILABLE = True
except ImportError:
LIBMAGIC_AVAILABLE = False
HERE = os.path.dirname(os.path.abspath(__file__))
class TestUrlForHelpers(unittest.TestCase):
def setUp(self):
def dummy_view():
return {}
def dummy_guid_project_view():
return {}
def dummy_guid_profile_view():
return {}
self.app = Flask(__name__)
api_rule = Rule([
'/api/v1/<pid>/',
'/api/v1/<pid>/component/<nid>/'
], 'get', dummy_view, json_renderer)
web_rule = Rule([
'/<pid>/',
'/<pid>/component/<nid>/'
], 'get', dummy_view, OsfWebRenderer)
web_guid_project_rule = Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', dummy_guid_project_view, OsfWebRenderer)
web_guid_profile_rule = Rule([
'/profile/<pid>/',
], 'get', dummy_guid_profile_view, OsfWebRenderer)
process_rules(self.app, [api_rule, web_rule, web_guid_project_rule, web_guid_profile_rule])
def test_api_url_for(self):
with self.app.test_request_context():
assert api_url_for('dummy_view', pid='123') == '/api/v1/123/'
def test_api_v2_url_with_port(self):
full_url = api_v2_url('/nodes/abcd3/contributors/',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_equal(full_url, "http://localhost:8000/v2/nodes/abcd3/contributors/")
# Handles URL the same way whether or not user enters a leading slash
full_url = api_v2_url('nodes/abcd3/contributors/',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_equal(full_url, "http://localhost:8000/v2/nodes/abcd3/contributors/")
# User is still responsible for the trailing slash. If they omit it, it doesn't appear at end of URL
full_url = api_v2_url('/nodes/abcd3/contributors',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_not_equal(full_url, "http://localhost:8000/v2/nodes/abcd3/contributors/")
def test_api_v2_url_with_params(self):
"""Handles- and encodes- URLs with parameters (dict and kwarg) correctly"""
full_url = api_v2_url('/nodes/abcd3/contributors/',
params={'filter[fullname]': 'bob'},
base_route='https://api.osf.io/',
base_prefix='v2/',
page_size=10)
assert_equal(full_url, "https://api.osf.io/v2/nodes/abcd3/contributors/?filter%5Bfullname%5D=bob&page_size=10")
def test_api_v2_url_base_path(self):
"""Given a blank string, should return the base path (domain + port + prefix) with no extra cruft at end"""
full_url = api_v2_url('',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_equal(full_url, "http://localhost:8000/v2/")
def test_web_url_for(self):
with self.app.test_request_context():
assert web_url_for('dummy_view', pid='123') == '/123/'
def test_web_url_for_guid(self):
with self.app.test_request_context():
# check /project/<pid>
assert_equal('/pid123/', web_url_for('dummy_guid_project_view', pid='pid123', _guid=True))
assert_equal('/project/pid123/', web_url_for('dummy_guid_project_view', pid='pid123', _guid=False))
assert_equal('/project/pid123/', web_url_for('dummy_guid_project_view', pid='pid123'))
# check /project/<pid>/node/<nid>
assert_equal('/nid321/', web_url_for('dummy_guid_project_view', pid='pid123', nid='nid321', _guid=True))
assert_equal(
'/project/pid123/node/nid321/',
web_url_for('dummy_guid_project_view', pid='pid123', nid='nid321', _guid=False))
assert_equal(
'/project/pid123/node/nid321/',
web_url_for('dummy_guid_project_view', pid='pid123', nid='nid321'))
# check /profile/<pid>
assert_equal('/pro123/', web_url_for('dummy_guid_profile_view', pid='pro123', _guid=True))
assert_equal('/profile/pro123/', web_url_for('dummy_guid_profile_view', pid='pro123', _guid=False))
assert_equal('/profile/pro123/', web_url_for('dummy_guid_profile_view', pid='pro123'))
def test_web_url_for_guid_regex_conditions(self):
with self.app.test_request_context():
# regex matches limit keys to a minimum of 5 alphanumeric characters.
# check /project/<pid>
assert_not_equal('/123/', web_url_for('dummy_guid_project_view', pid='123', _guid=True))
assert_equal('/123456/', web_url_for('dummy_guid_project_view', pid='123456', _guid=True))
# check /project/<pid>/node/<nid>
assert_not_equal('/321/', web_url_for('dummy_guid_project_view', pid='123', nid='321', _guid=True))
assert_equal('/654321/', web_url_for('dummy_guid_project_view', pid='123456', nid='654321', _guid=True))
# check /profile/<pid>
assert_not_equal('/123/', web_url_for('dummy_guid_profile_view', pid='123', _guid=True))
assert_equal('/123456/', web_url_for('dummy_guid_profile_view', pid='123456', _guid=True))
def test_web_url_for_guid_case_sensitive(self):
with self.app.test_request_context():
# check /project/<pid>
assert_equal('/ABCdef/', web_url_for('dummy_guid_project_view', pid='ABCdef', _guid=True))
# check /project/<pid>/node/<nid>
assert_equal('/GHIjkl/', web_url_for('dummy_guid_project_view', pid='ABCdef', nid='GHIjkl', _guid=True))
# check /profile/<pid>
assert_equal('/MNOpqr/', web_url_for('dummy_guid_profile_view', pid='MNOpqr', _guid=True))
def test_web_url_for_guid_invalid_unicode(self):
with self.app.test_request_context():
# unicode id's are not supported when encoding guid url's.
# check /project/<pid>
assert_not_equal('/ø∆≤µ©/', web_url_for('dummy_guid_project_view', pid='ø∆≤µ©', _guid=True))
assert_equal(
'/project/%C3%B8%CB%86%E2%88%86%E2%89%A4%C2%B5%CB%86/',
web_url_for('dummy_guid_project_view', pid='øˆ∆≤µˆ', _guid=True))
# check /project/<pid>/node/<nid>
assert_not_equal(
'/ø∆≤µ©/',
web_url_for('dummy_guid_project_view', pid='ø∆≤µ©', nid='©µ≤∆ø', _guid=True))
assert_equal(
'/project/%C3%B8%CB%86%E2%88%86%E2%89%A4%C2%B5%CB%86/node/%C2%A9%C2%B5%E2%89%A4%E2%88%86%C3%B8/',
web_url_for('dummy_guid_project_view', pid='øˆ∆≤µˆ', nid='©µ≤∆ø', _guid=True))
# check /profile/<pid>
assert_not_equal('/ø∆≤µ©/', web_url_for('dummy_guid_profile_view', pid='ø∆≤µ©', _guid=True))
assert_equal(
'/profile/%C3%B8%CB%86%E2%88%86%E2%89%A4%C2%B5%CB%86/',
web_url_for('dummy_guid_profile_view', pid='øˆ∆≤µˆ', _guid=True))
def test_api_url_for_with_multiple_urls(self):
with self.app.test_request_context():
url = api_url_for('dummy_view', pid='123', nid='abc')
assert url == '/api/v1/123/component/abc/'
def test_web_url_for_with_multiple_urls(self):
with self.app.test_request_context():
url = web_url_for('dummy_view', pid='123', nid='abc')
assert url == '/123/component/abc/'
def test_is_json_request(self):
with self.app.test_request_context(content_type='application/json'):
assert_true(is_json_request())
with self.app.test_request_context(content_type=None):
assert_false(is_json_request())
with self.app.test_request_context(content_type='application/json;charset=UTF-8'):
assert_true(is_json_request())
def test_waterbutler_url_for(self):
with self.app.test_request_context():
url = waterbutler_url_for('upload', 'provider', 'path', mock.Mock(_id='_id'))
assert_in('nid=_id', url)
assert_in('/file?', url)
assert_in('path=path', url)
assert_in('provider=provider', url)
def test_waterbutler_url_for_implicit_cookie(self):
with self.app.test_request_context() as context:
context.request.cookies = {settings.COOKIE_NAME: 'cookie'}
url = waterbutler_url_for('upload', 'provider', 'path', mock.Mock(_id='_id'))
assert_in('nid=_id', url)
assert_in('/file?', url)
assert_in('path=path', url)
assert_in('cookie=cookie', url)
assert_in('provider=provider', url)
def test_waterbutler_url_for_cookie_not_required(self):
with self.app.test_request_context():
url = waterbutler_url_for('upload', 'provider', 'path', mock.Mock(_id='_id'))
assert_not_in('cookie', url)
assert_in('nid=_id', url)
assert_in('/file?', url)
assert_in('path=path', url)
assert_in('provider=provider', url)
class TestGetMimeTypes(unittest.TestCase):
def test_get_markdown_mimetype_from_filename(self):
name = 'test.md'
mimetype = get_mimetype(name)
assert_equal('text/x-markdown', mimetype)
@unittest.skipIf(not LIBMAGIC_AVAILABLE, 'Must have python-magic and libmagic installed')
def test_unknown_extension_with_no_contents_not_real_file_results_in_exception(self):
name = 'test.thisisnotarealextensionidonotcarwhatyousay'
with assert_raises(IOError):
get_mimetype(name)
@unittest.skipIf(LIBMAGIC_AVAILABLE, 'This test only runs if python-magic and libmagic are not installed')
def test_unknown_extension_with_no_contents_not_real_file_results_in_exception2(self):
name = 'test.thisisnotarealextensionidonotcarwhatyousay'
mime_type = get_mimetype(name)
assert_equal(None, mime_type)
@unittest.skipIf(not LIBMAGIC_AVAILABLE, 'Must have python-magic and libmagic installed')
def test_unknown_extension_with_real_file_results_in_python_mimetype(self):
name = 'test_views.notarealfileextension'
maybe_python_file = os.path.join(HERE, 'test_files', name)
mimetype = get_mimetype(maybe_python_file)
assert_equal('text/x-python', mimetype)
@unittest.skipIf(not LIBMAGIC_AVAILABLE, 'Must have python-magic and libmagic installed')
def test_unknown_extension_with_python_contents_results_in_python_mimetype(self):
name = 'test.thisisnotarealextensionidonotcarwhatyousay'
python_file = os.path.join(HERE, 'test_utils.py')
with open(python_file, 'r') as the_file:
content = the_file.read()
mimetype = get_mimetype(name, content)
assert_equal('text/x-python', mimetype)
class TestFrameworkUtils(unittest.TestCase):
def test_leading_underscores(self):
assert_equal(
'__init__.py',
secure_filename('__init__.py')
)
def test_werkzeug_cases(self):
"""Test that Werkzeug's tests still pass for our wrapped version"""
# Copied from Werkzeug
# BSD licensed - original at github.com/mitsuhiko/werkzeug,
# /tests/test_utils.py, line 282, commit 811b438
assert_equal(
'My_cool_movie.mov',
secure_filename('My cool movie.mov')
)
assert_equal(
'etc_passwd',
secure_filename('../../../etc/passwd')
)
assert_equal(
'i_contain_cool_umlauts.txt',
secure_filename(u'i contain cool \xfcml\xe4uts.txt')
)
class TestWebpackFilter(unittest.TestCase):
def setUp(self):
self.asset_paths = {'assets': 'assets.07123e.js'}
def test_resolve_asset(self):
asset = paths.webpack_asset('assets.js', self.asset_paths, debug=False)
assert_equal(asset, '/static/public/js/assets.07123e.js')
def test_resolve_asset_not_found_and_not_in_debug_mode(self):
with assert_raises(KeyError):
paths.webpack_asset('bundle.js', self.asset_paths, debug=False)
class TestWebsiteUtils(unittest.TestCase):
def test_conjunct(self):
words = []
assert_equal(conjunct(words), '')
words = ['a']
assert_equal(conjunct(words), 'a')
words = ['a', 'b']
assert_equal(conjunct(words), 'a and b')
words = ['a', 'b', 'c']
assert_equal(conjunct(words), 'a, b, and c')
assert_equal(conjunct(words, conj='or'), 'a, b, or c')
def test_rapply(self):
inputs = {
'foo': 'bar',
'baz': {
'boom': ['kapow'],
'bang': 'bam'
},
'bat': ['man']
}
outputs = util.rapply(inputs, str.upper)
assert_equal(outputs['foo'], 'bar'.upper())
assert_equal(outputs['baz']['boom'], ['kapow'.upper()])
assert_equal(outputs['baz']['bang'], 'bam'.upper())
assert_equal(outputs['bat'], ['man'.upper()])
r_assert = lambda s: assert_equal(s.upper(), s)
util.rapply(outputs, r_assert)
def test_rapply_on_list(self):
inputs = range(5)
add_one = lambda n: n + 1
outputs = util.rapply(inputs, add_one)
for i in inputs:
assert_equal(outputs[i], i + 1)
def test_rapply_on_tuple(self):
inputs = tuple(i for i in range(5))
add_one = lambda n: n + 1
outputs = util.rapply(inputs, add_one)
for i in inputs:
assert_equal(outputs[i], i + 1)
assert_equal(type(outputs), tuple)
def test_rapply_on_set(self):
inputs = set(i for i in range(5))
add_one = lambda n: n + 1
outputs = util.rapply(inputs, add_one)
for i in inputs:
assert_in(i + 1, outputs)
assert_true(isinstance(outputs, set))
def test_rapply_on_str(self):
input = "bob"
convert = lambda s: s.upper()
outputs = util.rapply(input, convert)
assert_equal("BOB", outputs)
assert_true(isinstance(outputs, basestring))
def test_rapply_preserves_args_and_kwargs(self):
def zero_if_not_check(item, check, checkFn=lambda n: n):
if check and checkFn(item):
return item
return 0
inputs = range(5)
outputs = util.rapply(inputs, zero_if_not_check, True, checkFn=lambda n: n % 2)
assert_equal(outputs, [0, 1, 0, 3, 0])
outputs = util.rapply(inputs, zero_if_not_check, False, checkFn=lambda n: n % 2)
assert_equal(outputs, [0, 0, 0, 0, 0])
class TestProjectUtils(OsfTestCase):
def set_registered_date(self, reg, date):
reg._fields['registered_date'].__set__(
reg,
date,
safe=True
)
reg.save()
def test_get_recent_public_registrations(self):
count = 0
for i in range(5):
reg = RegistrationFactory()
reg.is_public = True
count = count + 1
tdiff = datetime.datetime.now() - datetime.timedelta(days=count)
self.set_registered_date(reg, tdiff)
regs = [r for r in project_utils.recent_public_registrations()]
assert_equal(len(regs), 5)
for i in range(4):
assert_true(regs[i].registered_date > regs[i + 1].registered_date)
for i in range(5):
reg = RegistrationFactory()
reg.is_public = True
count = count + 1
tdiff = datetime.datetime.now() - datetime.timedelta(days=count)
self.set_registered_date(reg, tdiff)
regs = [r for r in project_utils.recent_public_registrations(7)]
assert_equal(len(regs), 7)
| petermalcolm/osf.io | tests/test_utils.py | Python | apache-2.0 | 16,788 |
# Patch transplanting extension for Mercurial
#
# Copyright 2006, 2007 Brendan Cully <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to transplant changesets from another branch
This extension allows you to transplant patches from another branch.
Transplanted patches are recorded in .hg/transplant/transplants, as a
map from a changeset hash to its hash in the source repository.
'''
from mercurial.i18n import _
import os, tempfile
from mercurial import bundlerepo, changegroup, cmdutil, hg, merge, match
from mercurial import patch, revlog, util, error
class transplantentry(object):
def __init__(self, lnode, rnode):
self.lnode = lnode
self.rnode = rnode
class transplants(object):
def __init__(self, path=None, transplantfile=None, opener=None):
self.path = path
self.transplantfile = transplantfile
self.opener = opener
if not opener:
self.opener = util.opener(self.path)
self.transplants = []
self.dirty = False
self.read()
def read(self):
abspath = os.path.join(self.path, self.transplantfile)
if self.transplantfile and os.path.exists(abspath):
for line in self.opener(self.transplantfile).read().splitlines():
lnode, rnode = map(revlog.bin, line.split(':'))
self.transplants.append(transplantentry(lnode, rnode))
def write(self):
if self.dirty and self.transplantfile:
if not os.path.isdir(self.path):
os.mkdir(self.path)
fp = self.opener(self.transplantfile, 'w')
for c in self.transplants:
l, r = map(revlog.hex, (c.lnode, c.rnode))
fp.write(l + ':' + r + '\n')
fp.close()
self.dirty = False
def get(self, rnode):
return [t for t in self.transplants if t.rnode == rnode]
def set(self, lnode, rnode):
self.transplants.append(transplantentry(lnode, rnode))
self.dirty = True
def remove(self, transplant):
del self.transplants[self.transplants.index(transplant)]
self.dirty = True
class transplanter(object):
def __init__(self, ui, repo):
self.ui = ui
self.path = repo.join('transplant')
self.opener = util.opener(self.path)
self.transplants = transplants(self.path, 'transplants',
opener=self.opener)
def applied(self, repo, node, parent):
'''returns True if a node is already an ancestor of parent
or has already been transplanted'''
if hasnode(repo, node):
if node in repo.changelog.reachable(parent, stop=node):
return True
for t in self.transplants.get(node):
# it might have been stripped
if not hasnode(repo, t.lnode):
self.transplants.remove(t)
return False
if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
return True
return False
def apply(self, repo, source, revmap, merges, opts={}):
'''apply the revisions in revmap one by one in revision order'''
revs = sorted(revmap)
p1, p2 = repo.dirstate.parents()
pulls = []
diffopts = patch.diffopts(self.ui, opts)
diffopts.git = True
lock = wlock = None
try:
wlock = repo.wlock()
lock = repo.lock()
for rev in revs:
node = revmap[rev]
revstr = '%s:%s' % (rev, revlog.short(node))
if self.applied(repo, node, p1):
self.ui.warn(_('skipping already applied revision %s\n') %
revstr)
continue
parents = source.changelog.parents(node)
if not opts.get('filter'):
# If the changeset parent is the same as the
# wdir's parent, just pull it.
if parents[0] == p1:
pulls.append(node)
p1 = node
continue
if pulls:
if source != repo:
repo.pull(source, heads=pulls)
merge.update(repo, pulls[-1], False, False, None)
p1, p2 = repo.dirstate.parents()
pulls = []
domerge = False
if node in merges:
# pulling all the merge revs at once would mean we
# couldn't transplant after the latest even if
# transplants before them fail.
domerge = True
if not hasnode(repo, node):
repo.pull(source, heads=[node])
if parents[1] != revlog.nullid:
self.ui.note(_('skipping merge changeset %s:%s\n')
% (rev, revlog.short(node)))
patchfile = None
else:
fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
fp = os.fdopen(fd, 'w')
gen = patch.diff(source, parents[0], node, opts=diffopts)
for chunk in gen:
fp.write(chunk)
fp.close()
del revmap[rev]
if patchfile or domerge:
try:
n = self.applyone(repo, node,
source.changelog.read(node),
patchfile, merge=domerge,
log=opts.get('log'),
filter=opts.get('filter'))
if n and domerge:
self.ui.status(_('%s merged at %s\n') % (revstr,
revlog.short(n)))
elif n:
self.ui.status(_('%s transplanted to %s\n')
% (revlog.short(node),
revlog.short(n)))
finally:
if patchfile:
os.unlink(patchfile)
if pulls:
repo.pull(source, heads=pulls)
merge.update(repo, pulls[-1], False, False, None)
finally:
self.saveseries(revmap, merges)
self.transplants.write()
lock.release()
wlock.release()
def filter(self, filter, changelog, patchfile):
'''arbitrarily rewrite changeset before applying it'''
self.ui.status(_('filtering %s\n') % patchfile)
user, date, msg = (changelog[1], changelog[2], changelog[4])
fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
fp = os.fdopen(fd, 'w')
fp.write("# HG changeset patch\n")
fp.write("# User %s\n" % user)
fp.write("# Date %d %d\n" % date)
fp.write(msg + '\n')
fp.close()
try:
util.system('%s %s %s' % (filter, util.shellquote(headerfile),
util.shellquote(patchfile)),
environ={'HGUSER': changelog[1]},
onerr=util.Abort, errprefix=_('filter failed'))
user, date, msg = self.parselog(file(headerfile))[1:4]
finally:
os.unlink(headerfile)
return (user, date, msg)
def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
filter=None):
'''apply the patch in patchfile to the repository as a transplant'''
(manifest, user, (time, timezone), files, message) = cl[:5]
date = "%d %d" % (time, timezone)
extra = {'transplant_source': node}
if filter:
(user, date, message) = self.filter(filter, cl, patchfile)
if log:
# we don't translate messages inserted into commits
message += '\n(transplanted from %s)' % revlog.hex(node)
self.ui.status(_('applying %s\n') % revlog.short(node))
self.ui.note('%s %s\n%s\n' % (user, date, message))
if not patchfile and not merge:
raise util.Abort(_('can only omit patchfile if merging'))
if patchfile:
try:
files = {}
try:
patch.patch(patchfile, self.ui, cwd=repo.root,
files=files, eolmode=None)
if not files:
self.ui.warn(_('%s: empty changeset')
% revlog.hex(node))
return None
finally:
files = patch.updatedir(self.ui, repo, files)
except Exception, inst:
seriespath = os.path.join(self.path, 'series')
if os.path.exists(seriespath):
os.unlink(seriespath)
p1 = repo.dirstate.parents()[0]
p2 = node
self.log(user, date, message, p1, p2, merge=merge)
self.ui.write(str(inst) + '\n')
raise util.Abort(_('Fix up the merge and run '
'hg transplant --continue'))
else:
files = None
if merge:
p1, p2 = repo.dirstate.parents()
repo.dirstate.setparents(p1, node)
m = match.always(repo.root, '')
else:
m = match.exact(repo.root, '', files)
n = repo.commit(message, user, date, extra=extra, match=m)
if not merge:
self.transplants.set(n, node)
return n
def resume(self, repo, source, opts=None):
'''recover last transaction and apply remaining changesets'''
if os.path.exists(os.path.join(self.path, 'journal')):
n, node = self.recover(repo)
self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
revlog.short(n)))
seriespath = os.path.join(self.path, 'series')
if not os.path.exists(seriespath):
self.transplants.write()
return
nodes, merges = self.readseries()
revmap = {}
for n in nodes:
revmap[source.changelog.rev(n)] = n
os.unlink(seriespath)
self.apply(repo, source, revmap, merges, opts)
def recover(self, repo):
'''commit working directory using journal metadata'''
node, user, date, message, parents = self.readlog()
merge = len(parents) == 2
if not user or not date or not message or not parents[0]:
raise util.Abort(_('transplant log file is corrupt'))
extra = {'transplant_source': node}
wlock = repo.wlock()
try:
p1, p2 = repo.dirstate.parents()
if p1 != parents[0]:
raise util.Abort(
_('working dir not at transplant parent %s') %
revlog.hex(parents[0]))
if merge:
repo.dirstate.setparents(p1, parents[1])
n = repo.commit(message, user, date, extra=extra)
if not n:
raise util.Abort(_('commit failed'))
if not merge:
self.transplants.set(n, node)
self.unlog()
return n, node
finally:
wlock.release()
def readseries(self):
nodes = []
merges = []
cur = nodes
for line in self.opener('series').read().splitlines():
if line.startswith('# Merges'):
cur = merges
continue
cur.append(revlog.bin(line))
return (nodes, merges)
def saveseries(self, revmap, merges):
if not revmap:
return
if not os.path.isdir(self.path):
os.mkdir(self.path)
series = self.opener('series', 'w')
for rev in sorted(revmap):
series.write(revlog.hex(revmap[rev]) + '\n')
if merges:
series.write('# Merges\n')
for m in merges:
series.write(revlog.hex(m) + '\n')
series.close()
def parselog(self, fp):
parents = []
message = []
node = revlog.nullid
inmsg = False
for line in fp.read().splitlines():
if inmsg:
message.append(line)
elif line.startswith('# User '):
user = line[7:]
elif line.startswith('# Date '):
date = line[7:]
elif line.startswith('# Node ID '):
node = revlog.bin(line[10:])
elif line.startswith('# Parent '):
parents.append(revlog.bin(line[9:]))
elif not line.startswith('#'):
inmsg = True
message.append(line)
return (node, user, date, '\n'.join(message), parents)
def log(self, user, date, message, p1, p2, merge=False):
'''journal changelog metadata for later recover'''
if not os.path.isdir(self.path):
os.mkdir(self.path)
fp = self.opener('journal', 'w')
fp.write('# User %s\n' % user)
fp.write('# Date %s\n' % date)
fp.write('# Node ID %s\n' % revlog.hex(p2))
fp.write('# Parent ' + revlog.hex(p1) + '\n')
if merge:
fp.write('# Parent ' + revlog.hex(p2) + '\n')
fp.write(message.rstrip() + '\n')
fp.close()
def readlog(self):
return self.parselog(self.opener('journal'))
def unlog(self):
'''remove changelog journal'''
absdst = os.path.join(self.path, 'journal')
if os.path.exists(absdst):
os.unlink(absdst)
def transplantfilter(self, repo, source, root):
def matchfn(node):
if self.applied(repo, node, root):
return False
if source.changelog.parents(node)[1] != revlog.nullid:
return False
extra = source.changelog.read(node)[5]
cnode = extra.get('transplant_source')
if cnode and self.applied(repo, cnode, root):
return False
return True
return matchfn
def hasnode(repo, node):
try:
return repo.changelog.rev(node) != None
except error.RevlogError:
return False
def browserevs(ui, repo, nodes, opts):
'''interactively transplant changesets'''
def browsehelp(ui):
ui.write(_('y: transplant this changeset\n'
'n: skip this changeset\n'
'm: merge at this changeset\n'
'p: show patch\n'
'c: commit selected changesets\n'
'q: cancel transplant\n'
'?: show this help\n'))
displayer = cmdutil.show_changeset(ui, repo, opts)
transplants = []
merges = []
for node in nodes:
displayer.show(repo[node])
action = None
while not action:
action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
if action == '?':
browsehelp(ui)
action = None
elif action == 'p':
parent = repo.changelog.parents(node)[0]
for chunk in patch.diff(repo, parent, node):
ui.write(chunk)
action = None
elif action not in ('y', 'n', 'm', 'c', 'q'):
ui.write(_('no such option\n'))
action = None
if action == 'y':
transplants.append(node)
elif action == 'm':
merges.append(node)
elif action == 'c':
break
elif action == 'q':
transplants = ()
merges = ()
break
displayer.close()
return (transplants, merges)
def transplant(ui, repo, *revs, **opts):
'''transplant changesets from another branch
Selected changesets will be applied on top of the current working
directory with the log of the original changeset. If --log is
specified, log messages will have a comment appended of the form::
(transplanted from CHANGESETHASH)
You can rewrite the changelog message with the --filter option.
Its argument will be invoked with the current changelog message as
$1 and the patch as $2.
If --source/-s is specified, selects changesets from the named
repository. If --branch/-b is specified, selects changesets from
the branch holding the named revision, up to that revision. If
--all/-a is specified, all changesets on the branch will be
transplanted, otherwise you will be prompted to select the
changesets you want.
hg transplant --branch REVISION --all will rebase the selected
branch (up to the named revision) onto your current working
directory.
You can optionally mark selected transplanted changesets as merge
changesets. You will not be prompted to transplant any ancestors
of a merged transplant, and you can merge descendants of them
normally instead of transplanting them.
If no merges or revisions are provided, hg transplant will start
an interactive changeset browser.
If a changeset application fails, you can fix the merge by hand
and then resume where you left off by calling hg transplant
--continue/-c.
'''
def getremotechanges(repo, url):
sourcerepo = ui.expandpath(url)
source = hg.repository(ui, sourcerepo)
common, incoming, rheads = repo.findcommonincoming(source, force=True)
if not incoming:
return (source, None, None)
bundle = None
if not source.local():
if source.capable('changegroupsubset'):
cg = source.changegroupsubset(incoming, rheads, 'incoming')
else:
cg = source.changegroup(incoming, 'incoming')
bundle = changegroup.writebundle(cg, None, 'HG10UN')
source = bundlerepo.bundlerepository(ui, repo.root, bundle)
return (source, incoming, bundle)
def incwalk(repo, incoming, branches, match=util.always):
if not branches:
branches = None
for node in repo.changelog.nodesbetween(incoming, branches)[0]:
if match(node):
yield node
def transplantwalk(repo, root, branches, match=util.always):
if not branches:
branches = repo.heads()
ancestors = []
for branch in branches:
ancestors.append(repo.changelog.ancestor(root, branch))
for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
if match(node):
yield node
def checkopts(opts, revs):
if opts.get('continue'):
if opts.get('branch') or opts.get('all') or opts.get('merge'):
raise util.Abort(_('--continue is incompatible with '
'branch, all or merge'))
return
if not (opts.get('source') or revs or
opts.get('merge') or opts.get('branch')):
raise util.Abort(_('no source URL, branch tag or revision '
'list provided'))
if opts.get('all'):
if not opts.get('branch'):
raise util.Abort(_('--all requires a branch revision'))
if revs:
raise util.Abort(_('--all is incompatible with a '
'revision list'))
checkopts(opts, revs)
if not opts.get('log'):
opts['log'] = ui.config('transplant', 'log')
if not opts.get('filter'):
opts['filter'] = ui.config('transplant', 'filter')
tp = transplanter(ui, repo)
p1, p2 = repo.dirstate.parents()
if len(repo) > 0 and p1 == revlog.nullid:
raise util.Abort(_('no revision checked out'))
if not opts.get('continue'):
if p2 != revlog.nullid:
raise util.Abort(_('outstanding uncommitted merges'))
m, a, r, d = repo.status()[:4]
if m or a or r or d:
raise util.Abort(_('outstanding local changes'))
bundle = None
source = opts.get('source')
if source:
(source, incoming, bundle) = getremotechanges(repo, source)
else:
source = repo
try:
if opts.get('continue'):
tp.resume(repo, source, opts)
return
tf = tp.transplantfilter(repo, source, p1)
if opts.get('prune'):
prune = [source.lookup(r)
for r in cmdutil.revrange(source, opts.get('prune'))]
matchfn = lambda x: tf(x) and x not in prune
else:
matchfn = tf
branches = map(source.lookup, opts.get('branch', ()))
merges = map(source.lookup, opts.get('merge', ()))
revmap = {}
if revs:
for r in cmdutil.revrange(source, revs):
revmap[int(r)] = source.lookup(r)
elif opts.get('all') or not merges:
if source != repo:
alltransplants = incwalk(source, incoming, branches,
match=matchfn)
else:
alltransplants = transplantwalk(source, p1, branches,
match=matchfn)
if opts.get('all'):
revs = alltransplants
else:
revs, newmerges = browserevs(ui, source, alltransplants, opts)
merges.extend(newmerges)
for r in revs:
revmap[source.changelog.rev(r)] = r
for r in merges:
revmap[source.changelog.rev(r)] = r
tp.apply(repo, source, revmap, merges, opts)
finally:
if bundle:
source.close()
os.unlink(bundle)
cmdtable = {
"transplant":
(transplant,
[('s', 'source', '', _('pull patches from REPOSITORY')),
('b', 'branch', [], _('pull patches from branch BRANCH')),
('a', 'all', None, _('pull all changesets up to BRANCH')),
('p', 'prune', [], _('skip over REV')),
('m', 'merge', [], _('merge at REV')),
('', 'log', None, _('append transplant info to log message')),
('c', 'continue', None, _('continue last transplant session '
'after repair')),
('', 'filter', '', _('filter changesets through FILTER'))],
_('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
'[-m REV] [REV]...'))
}
| joewalnes/idea-community | plugins/hg4idea/testData/bin/hgext/transplant.py | Python | apache-2.0 | 22,790 |
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__version__ = '0.1.8'
| rgs1/zktraffic | zktraffic/__init__.py | Python | apache-2.0 | 924 |
from django.apps import AppConfig
class FoodsConfig(AppConfig):
name = 'foods'
| chiara-paci/baskerville | baskervilleweb/foods/apps.py | Python | gpl-3.0 | 85 |
#!/usr/bin/env python
__author__ = "Vivek <[email protected]>"
__copyright__ = "Copyright 2014, http://radical.rutgers.edu"
__license__ = "MIT"
__use_case_name__ = "'Gromacs + LSDMap' simulation-analysis proof-of-concept (ExTASY)."
from radical.ensemblemd import Kernel
from radical.ensemblemd import SimulationAnalysisLoop
from radical.ensemblemd import EnsemblemdError
from radical.ensemblemd import SimulationAnalysisLoop
from radical.ensemblemd import SingleClusterEnvironment
import sys
import imp
import argparse
import os
import json
from radical.ensemblemd.engine import get_engine
# ------------------------------------------------------------------------------
# Set default verbosity
if os.environ.get('RADICAL_ENMD_VERBOSE') == None:
os.environ['RADICAL_ENMD_VERBOSE'] = 'REPORT'
# ------------------------------------------------------------------------------
#Load all custom Kernels
from kernel_defs.pre_grlsd_loop import kernel_pre_grlsd_loop
get_engine().add_kernel_plugin(kernel_pre_grlsd_loop)
from kernel_defs.gromacs import kernel_gromacs
get_engine().add_kernel_plugin(kernel_gromacs)
from kernel_defs.pre_lsdmap import kernel_pre_lsdmap
get_engine().add_kernel_plugin(kernel_pre_lsdmap)
from kernel_defs.lsdmap import kernel_lsdmap
get_engine().add_kernel_plugin(kernel_lsdmap)
from kernel_defs.post_lsdmap import kernel_post_lsdmap
get_engine().add_kernel_plugin(kernel_post_lsdmap)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
#
class Gromacs_LSDMap(SimulationAnalysisLoop):
# TODO Vivek: add description.
def __init__(self, maxiterations, simulation_instances=1, analysis_instances=1):
SimulationAnalysisLoop.__init__(self, maxiterations, simulation_instances, analysis_instances)
def pre_loop(self):
'''
function : transfers input files and intermediate executables
pre_grlsd_loop :-
Purpose : Transfers files, Split the input file into smaller files to be used by each of the
gromacs instances in the first iteration.
Arguments : --inputfile = file to be split
--numCUs = number of simulation instances/ number of smaller files
'''
k = Kernel(name="custom.pre_grlsd_loop")
k.arguments = ["--inputfile={0}".format(os.path.basename(Kconfig.md_input_file)),"--numCUs={0}".format(Kconfig.num_CUs)]
k.copy_input_data = [ '$SHARED/{0}'.format(os.path.basename(Kconfig.md_input_file)),
'$SHARED/spliter.py',
'$SHARED/gro.py'
]
return k
def simulation_step(self, iteration, instance):
'''
function : In iter=1, use the input files from pre_loop, else use the outputs of the analysis stage in the
previous iteration. Run gromacs in each instance using these files.
gromacs :-
Purpose : Run the gromacs simulation on each of the smaller files. Parameter files and executables are input
from pre_loop. There are 'numCUs' number of instances of gromacs per iteration.
Arguments : --grompp = gromacs parameters filename
--topol = topology filename
'''
gromacs = Kernel(name="custom.gromacs")
gromacs.arguments = ["--grompp={0}".format(os.path.basename(Kconfig.mdp_file)),
"--topol={0}".format(os.path.basename(Kconfig.top_file))]
gromacs.link_input_data = ['$SHARED/{0} > {0}'.format(os.path.basename(Kconfig.mdp_file)),
'$SHARED/{0} > {0}'.format(os.path.basename(Kconfig.top_file)),
'$SHARED/run.py > run.py']
if (iteration-1==0):
gromacs.link_input_data.append('$PRE_LOOP/temp/start{0}.gro > start.gro'.format(instance-1))
else:
gromacs.link_input_data.append('$ANALYSIS_ITERATION_{0}_INSTANCE_1/temp/start{1}.gro > start.gro'.format(iteration-1,instance-1))
if Kconfig.grompp_options is not None:
gromacs.environment = {'grompp_options':Kconfig.grompp_options}
if Kconfig.mdrun_options is not None:
gromacs.environment = {'mdrun_options':Kconfig.mdrun_options}
if Kconfig.ndx_file is not None:
gromacs.environment = {'ndxfile',os.path.basename(Kconfig.ndx_file)}
gromacs.link_input_data.append('$SHARED/{0}'.format(os.path.basename(Kconfig.ndx_file)))
return gromacs
def analysis_step(self, iteration, instance):
'''
function : Merge the results of each of the simulation instances and run LSDMap analysis to generate the
new coordinate file. Split this new coordinate file into smaller files to be used by the simulation stage
in the next iteration.
If a step as multiple kernels (say k1, k2), data generated in k1 is implicitly moved to k2 (if k2 requires).
Data which needs to be moved between the various steps (pre_loop, simulation_step, analysis_step) needs to
be mentioned by the user.
pre_lsdmap :-
Purpose : The output of each gromacs instance in the simulation_step is a small coordinate file. Concatenate
such files from each of the gromacs instances to form a larger file. There is one instance of pre_lsdmap per
iteration.
Arguments : --numCUs = number of simulation instances / number of small files to be concatenated
lsdmap :-
Purpose : Perform LSDMap on the large coordinate file to generate weights and eigen values. There is one instance
of lsdmap per iteration (MSSA : Multiple Simulation Single Analysis model).
Arguments : --config = name of the config file to be used during LSDMap
post_lsdmap :-
Purpose : Use the weights, eigen values generated in lsdmap along with other parameter files from pre_loop
to generate the new coordinate file to be used by the simulation_step in the next iteration. There is one
instance of post_lsdmap per iteration.
post_ana.arguments = ["--num_runs={0}".format(Kconfig.num_runs),
"--out=out.gro",
"--cycle={0}".format(iteration-1),
"--max_dead_neighbors={0}".format(Kconfig.max_dead_neighbors),
"--max_alive_neighbors={0}".format(Kconfig.max_alive_neighbors),
"--numCUs={0}".format(Kconfig.num_CUs)]
Arguments : --num_runs = number of configurations to be generated in the new coordinate file
--out = output filename
--cycle = iteration number
--max_dead_neighbors = max dead neighbors to be considered
--max_alive_neighbors = max alive neighbors to be considered
--numCUs = number of simulation instances/ number of smaller files
'''
pre_ana = Kernel(name="custom.pre_lsdmap")
pre_ana.arguments = ["--numCUs={0}".format(Kconfig.num_CUs)]
pre_ana.link_input_data = ["$SHARED/pre_analyze.py > pre_analyze.py"]
for i in range(1,Kconfig.num_CUs+1):
pre_ana.link_input_data = pre_ana.link_input_data + ["$SIMULATION_ITERATION_{2}_INSTANCE_{0}/out.gro > out{1}.gro".format(i,i-1,iteration)]
pre_ana.copy_output_data = ['tmpha.gro > $SHARED/iter_{0}/tmpha.gro'.format(iteration-1),'tmp.gro > $SHARED/iter_{0}/tmp.gro'.format(iteration-1)]
lsdmap = Kernel(name="md.lsdmap")
lsdmap.arguments = ["--config={0}".format(os.path.basename(Kconfig.lsdm_config_file))]
lsdmap.link_input_data = ['$SHARED/{0} > {0}'.format(os.path.basename(Kconfig.lsdm_config_file)),'$SHARED/iter_{0}/tmpha.gro > tmpha.gro'.format(iteration-1)]
lsdmap.cores = 1
if iteration > 1:
lsdmap.link_input_data += ['$ANALYSIS_ITERATION_{0}_INSTANCE_1/weight.w > weight.w'.format(iteration-1)]
lsdmap.copy_output_data = ['weight.w > $SHARED/iter_{0}/weight.w'.format(iteration-1)]
lsdmap.copy_output_data = ['tmpha.ev > $SHARED/iter_{0}/tmpha.ev'.format(iteration-1),'out.nn > $SHARED/iter_{0}/out.nn'.format(iteration-1)]
if(iteration%Kconfig.nsave==0):
lsdmap.download_output_data=['lsdmap.log > output/iter{0}/lsdmap.log'.format(iteration-1)]
post_ana = Kernel(name="custom.post_lsdmap")
post_ana.link_input_data = ["$SHARED/post_analyze.py > post_analyze.py",
"$SHARED/selection.py > selection.py",
"$SHARED/reweighting.py > reweighting.py",
"$SHARED/spliter.py > spliter.py",
"$SHARED/gro.py > gro.py",
"$SHARED/iter_{0}/tmp.gro > tmp.gro".format(iteration-1),
"$SHARED/iter_{0}/tmpha.ev > tmpha.ev".format(iteration-1),
"$SHARED/iter_{0}/out.nn > out.nn".format(iteration-1),
"$SHARED/input.gro > input.gro"]
post_ana.arguments = ["--num_runs={0}".format(Kconfig.num_runs),
"--out=out.gro",
"--cycle={0}".format(iteration-1),
"--max_dead_neighbors={0}".format(Kconfig.max_dead_neighbors),
"--max_alive_neighbors={0}".format(Kconfig.max_alive_neighbors),
"--numCUs={0}".format(Kconfig.num_CUs)]
if iteration > 1:
post_ana.link_input_data += ['$ANALYSIS_ITERATION_{0}_INSTANCE_1/weight.w > weight_new.w'.format(iteration-1)]
if(iteration%Kconfig.nsave==0):
post_ana.download_output_data = ['out.gro > output/iter{0}/out.gro'.format(iteration),
'weight.w > output/iter{0}/weight.w'.format(iteration)]
return [pre_ana,lsdmap,post_ana]
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser()
parser.add_argument('--RPconfig', help='link to Radical Pilot related configurations file')
parser.add_argument('--Kconfig', help='link to Kernel configurations file')
args = parser.parse_args()
if args.RPconfig is None:
parser.error('Please enter a RP configuration file')
sys.exit(1)
if args.Kconfig is None:
parser.error('Please enter a Kernel configuration file')
sys.exit(0)
RPconfig = imp.load_source('RPconfig', args.RPconfig)
Kconfig = imp.load_source('Kconfig', args.Kconfig)
# Create a new static execution context with one resource and a fixed
# number of cores and runtime.
cluster = SingleClusterEnvironment(
resource=RPconfig.REMOTE_HOST,
cores=RPconfig.PILOTSIZE,
walltime=RPconfig.WALLTIME,
username = RPconfig.UNAME, #username
project = RPconfig.ALLOCATION, #project
queue = RPconfig.QUEUE,
database_url = RPconfig.DBURL
)
cluster.shared_data = [
Kconfig.md_input_file,
Kconfig.lsdm_config_file,
Kconfig.top_file,
Kconfig.mdp_file,
'{0}/spliter.py'.format(Kconfig.helper_scripts),
'{0}/gro.py'.format(Kconfig.helper_scripts),
'{0}/run.py'.format(Kconfig.helper_scripts),
'{0}/pre_analyze.py'.format(Kconfig.helper_scripts),
'{0}/post_analyze.py'.format(Kconfig.helper_scripts),
'{0}/selection.py'.format(Kconfig.helper_scripts),
'{0}/reweighting.py'.format(Kconfig.helper_scripts)
]
if Kconfig.ndx_file is not None:
cluster.shared_data.append(Kconfig.ndx_file)
cluster.allocate()
# We set the 'instances' of the simulation step to 16. This means that 16
# instances of the simulation are executed every iteration.
# We set the 'instances' of the analysis step to 1. This means that only
# one instance of the analysis is executed for each iteration
randomsa = Gromacs_LSDMap(maxiterations=Kconfig.num_iterations, simulation_instances=Kconfig.num_CUs, analysis_instances=1)
cluster.run(randomsa)
cluster.deallocate()
except EnsemblemdError, er:
print "Ensemble MD Toolkit Error: {0}".format(str(er))
raise # Just raise the execption again to get the backtrace
| radical-cybertools/ExTASY | examples/grlsd-on-stampede/extasy_gromacs_lsdmap.py | Python | mit | 13,334 |
import sys
if sys.platform[:3]=='win':
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
else:
from distutils.core import setup
from Cython.Build import cythonize
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules = cythonize("master_fitter_utils.pyx"),
include_dirs=[numpy.get_include()]
)
| Nassehk/Dilatometry-analysis | dilatometry/Cython tools/setup_master_fitter_utils.py | Python | gpl-3.0 | 524 |
import os
import re
from . import Helper
class BLSItem:
def __init__(self, blsName, srcDir):
if srcDir:
self.srcDir = srcDir
self.name = os.path.splitext(blsName.lower())[0]
self.blsFullName = os.path.join(srcDir, blsName)
else:
self.srcDir = Helper.getWorkingDirForFile(blsName) + '\\SOURCE'
self.name = os.path.splitext(os.path.basename(blsName).lower())[0]
self.blsFullName = blsName
self.addedToCompile = False
self.dependence = self.__getDependence__()
def __getDependence__(self):
if (self.blsFullName == ''):
print('BSScript: __getDependence__ no fullBLSFileName.')
return None
blsFile = open(self.blsFullName, "rb")
data = blsFile.read().decode("cp1251", "ignore")
blsFile.close()
data = re.sub(r'{[\S\s]*?}', '', data, flags = re.IGNORECASE)
data = re.sub(r'\(\*[\S\s]*?\*\)', '', data, flags = re.IGNORECASE)
data = re.sub(r'//.*', '', data, flags = re.IGNORECASE)
expr = re.compile(r'[^\']\buses\b([\s\S][^;]*);', flags = re.IGNORECASE)
matcher = expr.search(data)
strUses = ''
while matcher:
strUses = strUses + matcher.group(1) + ','
strUses = re.sub(r'\s', '', strUses, flags = re.IGNORECASE)
matcher = expr.search(data, matcher.end(1))
if (strUses != ''):
return strUses[:-1].split(',')
else:
return None | rusiv/BSScript | bsscript/BLSItem.py | Python | mit | 1,298 |
import os
# Run all tests in package for '-m unittest <test_package>'
def load_tests(loader, standard_tests, pattern):
this_dir = os.path.dirname(__file__)
if pattern is None:
pattern = "test*"
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
# We need these classes defined in a real module in order for pickling
# to work.
from dinsd import Scaler
class ID(Scaler):
def __init__(self, id):
if isinstance(id, self.__class__):
self.value = id.value
return
if not isinstance(id, str):
raise TypeError(
"Expected str but passed {}".format(type(id)))
if (2 <= len(id) <=4 and id.startswith(self.firstchar) and
id[1:].isdigit()):
self.value = id
else:
raise TypeError("Expected '{}' followed by one to "
"three digits, got {!r}".format(
self.firstchar, id))
class SID(ID):
firstchar = 'S'
class CID(ID):
firstchar = 'C'
| bitdancer/dinsd | src/test_support.py | Python | apache-2.0 | 1,123 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import commands
import filecmp
import logging
import os
import shutil
import sys
import tempfile
import urllib
import pyauto_functional # Must be imported before pyauto
import pyauto
import pyauto_utils
import test_utils
class DownloadsTest(pyauto.PyUITest):
"""TestCase for Downloads."""
def setUp(self):
pyauto.PyUITest.setUp(self)
# Record all entries in the download dir
download_dir = self.GetDownloadDirectory().value()
self._existing_downloads = []
if os.path.isdir(download_dir):
self._existing_downloads += os.listdir(download_dir)
self._files_to_remove = [] # Files to remove after browser shutdown
def tearDown(self):
# Cleanup all files we created in the download dir
download_dir = self.GetDownloadDirectory().value()
if os.path.isdir(download_dir):
for name in os.listdir(download_dir):
if name not in self._existing_downloads:
self._files_to_remove.append(os.path.join(download_dir, name))
pyauto.PyUITest.tearDown(self)
# Delete all paths marked for deletion after browser shutdown.
for item in self._files_to_remove:
pyauto_utils.RemovePath(item)
def _DeleteAfterShutdown(self, path):
"""Delete |path| after browser has been shut down.
This is so that all handles to the path would have been gone by then.
Silently Ignores errors, when the path does not exist, or if the path
could not be deleted.
"""
self._files_to_remove.append(path)
def _ClearLocalDownloadState(self, path):
"""Prepare for downloading the given path.
Clears the given path and the corresponding .crdownload, to prepare it to
be downloaded.
"""
os.path.exists(path) and os.remove(path)
crdownload = path + '.crdownload'
os.path.exists(crdownload) and os.remove(crdownload)
def _GetDangerousDownload(self):
"""Returns the file path for a dangerous download for this OS."""
sub_path = os.path.join(self.DataDir(), 'downloads', 'dangerous')
if self.IsWin():
return os.path.join(sub_path, 'dangerous.com')
return os.path.join(sub_path, 'dangerous.jar')
def _EqualFileContents(self, file1, file2):
"""Determine if 2 given files have the same contents."""
if not (os.path.exists(file1) and os.path.exists(file2)):
return False
return filecmp.cmp(file1, file2, shallow=False)
def _GetDownloadId(self, download_index=0):
"""Return the download id for the download at the given index.
Args:
download_index: The index of the download in the list of downloads.
Default is 0.
"""
return self.GetDownloadsInfo().Downloads()[download_index]['id']
def _MakeFile(self, size):
"""Make a file on-the-fly with the given size.
Note that it's really a 1byte file even though ls -lh will report it as
of file |size| (du reports the correct usage on disk), but it's good
enough for downloads tests because chrome will treat it as a file of size
|size| when downloading.
Returns:
the path to the created file.
"""
fd, file_path = tempfile.mkstemp(suffix='.zip', prefix='file-downloads-')
os.lseek(fd, size, 0)
os.write(fd, 'a')
os.close(fd)
# Make it readable by chronos on chromeos
os.chmod(file_path, 0755)
logging.debug('Created temporary file %s of size %d' % (file_path, size))
self._DeleteAfterShutdown(file_path)
return file_path
def _GetAllDownloadIDs(self):
"""Return a list of all download ids."""
return [download['id'] for download in self.GetDownloadsInfo().Downloads()]
def testNoDownloadWaitingNeeded(self):
"""Make sure "wait for downloads" returns quickly if we have none."""
self.WaitForAllDownloadsToComplete()
def testZip(self):
"""Download a zip and verify that it downloaded correctly.
Also verify that the download shelf showed up.
"""
test_dir = os.path.join(os.path.abspath(self.DataDir()), 'downloads')
file_path = os.path.join(test_dir, 'a_zip_file.zip')
file_url = self.GetFileURLForPath(file_path)
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
'a_zip_file.zip')
self._ClearLocalDownloadState(downloaded_pkg)
self.DownloadAndWaitForStart(file_url)
# Wait for the download to finish.
self.WaitForAllDownloadsToComplete()
# Verify that the download shelf is visible
self.assertTrue(self.IsDownloadShelfVisible())
# Verify that the file was correctly downloaded
self.assertTrue(os.path.exists(downloaded_pkg))
self.assertTrue(self._EqualFileContents(file_path, downloaded_pkg))
def testZipInIncognito(self):
"""Download and verify a zip in incognito window."""
test_dir = os.path.join(os.path.abspath(self.DataDir()), 'downloads')
file_path = os.path.join(test_dir, 'a_zip_file.zip')
file_url = self.GetFileURLForPath(file_path)
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
'a_zip_file.zip')
self._ClearLocalDownloadState(downloaded_pkg)
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
# Trigger download and wait in new incognito window.
self.DownloadAndWaitForStart(file_url, windex=1)
self.WaitForAllDownloadsToComplete(windex=1)
incognito_downloads = self.GetDownloadsInfo(1).Downloads()
# Verify that download info exists in the correct profile.
self.assertEqual(len(incognito_downloads), 1)
self.assertTrue(self._EqualFileContents(file_path, downloaded_pkg),
msg='%s (size %d) and %s (size %d) do not match' % (
file_path, os.path.getsize(file_path),
downloaded_pkg, os.path.getsize(downloaded_pkg)))
self.assertTrue(self.IsDownloadShelfVisible(1))
def testSaveDangerousFile(self):
"""Verify that we can download and save a dangerous file."""
file_path = self._GetDangerousDownload()
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
os.path.basename(file_path))
self._ClearLocalDownloadState(downloaded_pkg)
self._TriggerUnsafeDownload(os.path.basename(file_path))
self.PerformActionOnDownload(self._GetDownloadId(),
'save_dangerous_download')
self.WaitForAllDownloadsToComplete()
# Verify that the file was downloaded.
self.assertTrue(os.path.exists(downloaded_pkg))
self.assertTrue(self._EqualFileContents(file_path, downloaded_pkg))
self._DeleteAfterShutdown(downloaded_pkg)
def testDeclineDangerousDownload(self):
"""Verify that we can decline dangerous downloads"""
file_path = self._GetDangerousDownload()
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
os.path.basename(file_path))
self._ClearLocalDownloadState(downloaded_pkg)
self._TriggerUnsafeDownload(os.path.basename(file_path))
self.PerformActionOnDownload(self._GetDownloadId(),
'decline_dangerous_download')
self.assertFalse(os.path.exists(downloaded_pkg))
self.assertFalse(self.GetDownloadsInfo().Downloads())
self.assertFalse(self.IsDownloadShelfVisible())
def testRemoveDownload(self):
"""Verify that we can remove a download."""
file_url = self.GetFileURLForDataPath('downloads', 'a_zip_file.zip')
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
'a_zip_file.zip')
self._ClearLocalDownloadState(downloaded_pkg)
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
self.PerformActionOnDownload(self._GetDownloadId(), 'remove')
# The download is removed from downloads, but not from the disk.
self.assertFalse(self.GetDownloadsInfo().Downloads())
self.assertTrue(os.path.exists(downloaded_pkg))
self._DeleteAfterShutdown(downloaded_pkg)
def testBigZip(self):
"""Verify that we can download a 1GB file.
This test needs 2 GB of free space, 1 GB for the original zip file and
another for the downloaded one.
Note: This test increases automation timeout to 4 min. Things might seem
to hang.
"""
# Create a 1 GB file on the fly
file_path = self._MakeFile(2**30)
# Ensure there's sufficient space remaining to download file.
free_space = test_utils.GetFreeSpace(self.GetDownloadDirectory().value())
assert free_space >= 2**30, \
'Not enough disk space to download. Got %d free' % free_space
file_url = self.GetFileURLForPath(file_path)
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
os.path.basename(file_path))
self._ClearLocalDownloadState(downloaded_pkg)
self.DownloadAndWaitForStart(file_url)
self._DeleteAfterShutdown(downloaded_pkg)
self.WaitForAllDownloadsToComplete(timeout=10 * 60 * 1000);
# Verify that the file was correctly downloaded
self.assertTrue(os.path.exists(downloaded_pkg),
'Downloaded file %s missing.' % downloaded_pkg)
self.assertTrue(self._EqualFileContents(file_path, downloaded_pkg),
'Downloaded file %s does not match original' %
downloaded_pkg)
def testFileRenaming(self):
"""Test file renaming when downloading a already-existing filename."""
test_dir = os.path.join(os.path.abspath(self.DataDir()), 'downloads')
file_url = 'file://%s' % os.path.join(test_dir, 'a_zip_file.zip')
download_dir = self.GetDownloadDirectory().value()
num_times = 5
assert num_times > 1, 'needs to be > 1 to work'
renamed_files = []
for i in range(num_times):
expected_filename = os.path.join(download_dir, 'a_zip_file.zip')
if i > 0: # Files after first download are renamed.
expected_filename = os.path.join(download_dir,
'a_zip_file (%d).zip' % i)
renamed_files.append(expected_filename)
self._ClearLocalDownloadState(expected_filename)
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
# Verify that all files exist and have the right name
for filename in renamed_files:
self.assertTrue(os.path.exists(filename))
self._DeleteAfterShutdown(filename)
def testCrazyFilenames(self):
"""Test downloading with filenames containing special chars.
The files are created on the fly and cleaned after use.
"""
download_dir = self.GetDownloadDirectory().value()
filename = os.path.join(self.DataDir(), 'downloads', 'crazy_filenames.txt')
crazy_filenames = self.EvalDataFrom(filename)
logging.info('Testing with %d crazy filenames' % len(crazy_filenames))
def _CreateFile(name):
"""Create and fill the given file with some junk."""
fp = open(name, 'w') # name could be unicode
print >>fp, 'This is a junk file named %s. ' % repr(name) * 100
fp.close()
# Temp dir for hosting crazy filenames.
temp_dir = tempfile.mkdtemp(prefix='download')
self._DeleteAfterShutdown(unicode(temp_dir))
# Windows has a dual nature dealing with unicode filenames.
# While the files are internally saved as unicode, there's a non-unicode
# aware API that returns a locale-dependent coding on the true unicode
# filenames. This messes up things.
# Filesystem-interfacing functions like os.listdir() need to
# be given unicode strings to "do the right thing" on win.
# Ref: http://boodebr.org/main/python/all-about-python-and-unicode
for filename in crazy_filenames: # filename is unicode.
utf8_filename = filename.encode('utf-8')
file_path = os.path.join(temp_dir, utf8_filename)
_CreateFile(os.path.join(temp_dir, filename)) # unicode file.
file_url = self.GetFileURLForPath(file_path)
downloaded_file = os.path.join(download_dir, filename)
self._ClearLocalDownloadState(downloaded_file)
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
# Verify downloads.
downloads = self.GetDownloadsInfo().Downloads()
self.assertEqual(len(downloads), len(crazy_filenames))
for filename in crazy_filenames:
downloaded_file = os.path.join(download_dir, filename)
self.assertTrue(os.path.exists(downloaded_file))
self.assertTrue( # Verify file contents.
self._EqualFileContents(downloaded_file,
os.path.join(temp_dir, filename)))
os.path.exists(downloaded_file) and os.remove(downloaded_file)
def _TriggerUnsafeDownload(self, filename, tab_index=0, windex=0):
"""Trigger download of an unsafe/dangerous filetype.
Files explictly requested by the user (like navigating to a package, or
clicking on a link) aren't marked unsafe.
Only the ones where the user didn't directly initiate a download are
marked unsafe.
Navigates to download-dangerous.html which triggers the download.
Waits until the download starts.
Args:
filename: the name of the file to trigger the download.
This should exist in the 'dangerous' directory.
tab_index: tab index. Default 0.
windex: window index. Default 0.
"""
dangerous_dir = os.path.join(
self.DataDir(), 'downloads', 'dangerous')
assert os.path.isfile(os.path.join(dangerous_dir, filename))
file_url = self.GetFileURLForPath(os.path.join(
dangerous_dir, 'download-dangerous.html')) + '?' + filename
num_downloads = len(self.GetDownloadsInfo().Downloads())
self.NavigateToURL(file_url, windex, tab_index)
# It might take a while for the download to kick in, hold on until then.
self.assertTrue(self.WaitUntil(
lambda: len(self.GetDownloadsInfo().Downloads()) == num_downloads + 1))
# Wait for Download Shelf to appear to reduce flakiness.
self.assertTrue(self.WaitUntil(self.IsDownloadShelfVisible))
def testPauseAndResume(self):
"""Verify that pause and resume work while downloading a file.
Note: This test increases automation timeout to 2 min. Things might seem
to hang.
"""
# Create a 250 MB file on the fly
file_path = self._MakeFile(2**28)
# Ensure there's sufficient space remaining to download file.
free_space = test_utils.GetFreeSpace(self.GetDownloadDirectory().value())
assert free_space >= 2**28, \
'Not enough disk space to download. Got %d free' % free_space
file_url = self.GetFileURLForPath(file_path)
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
os.path.basename(file_path))
self._ClearLocalDownloadState(downloaded_pkg)
self.DownloadAndWaitForStart(file_url)
self._DeleteAfterShutdown(downloaded_pkg)
self._DeleteAfterShutdown(file_path)
# Pause the download and assert that it is paused.
pause_dict = self.PerformActionOnDownload(self._GetDownloadId(), 'pause')
if pause_dict['state'] == 'COMPLETE':
logging.info('The download completed before pause. Stopping test.')
return
self.assertTrue(pause_dict['is_paused'])
self.assertTrue(pause_dict['state'] == 'IN_PROGRESS')
# Resume the download and assert it is not paused.
resume_dict = self.PerformActionOnDownload(self._GetDownloadId(), 'resume')
self.assertFalse(resume_dict['is_paused'])
self.WaitForAllDownloadsToComplete(timeout=10 * 60 * 1000);
# Verify that the file was correctly downloaded after pause and resume.
self.assertTrue(os.path.exists(downloaded_pkg),
'Downloaded file %s missing.' % downloaded_pkg)
self.assertTrue(self._EqualFileContents(file_path, downloaded_pkg),
'Downloaded file %s does not match original' %
downloaded_pkg)
def testCancelDownload(self):
"""Verify that we can cancel a download."""
# Create a big file (250 MB) on the fly, so that the download won't finish
# before being cancelled.
file_path = self._MakeFile(2**28)
# Ensure there's sufficient space remaining to download file.
free_space = test_utils.GetFreeSpace(self.GetDownloadDirectory().value())
assert free_space >= 2**28, \
'Not enough disk space to download. Got %d free' % free_space
file_url = self.GetFileURLForPath(file_path)
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
os.path.basename(file_path))
self._ClearLocalDownloadState(downloaded_pkg)
self.DownloadAndWaitForStart(file_url)
self.PerformActionOnDownload(self._GetDownloadId(), 'cancel')
self._DeleteAfterShutdown(file_path)
state = self.GetDownloadsInfo().Downloads()[0]['state']
if state == 'COMPLETE':
logging.info('The download completed before cancel. Test stopped.')
return
# Verify the download has been cancelled.
self.assertEqual('CANCELLED',
self.GetDownloadsInfo().Downloads()[0]['state'])
self.assertFalse(os.path.exists(downloaded_pkg))
def testDownloadsPersistence(self):
"""Verify that download history persists on session restart."""
test_dir = os.path.join(os.path.abspath(self.DataDir()), 'downloads')
file_url = self.GetFileURLForPath(os.path.join(test_dir, 'a_zip_file.zip'))
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
'a_zip_file.zip')
self._ClearLocalDownloadState(downloaded_pkg)
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
downloads = self.GetDownloadsInfo().Downloads()
self.assertEqual(1, len(downloads))
self.assertEqual('a_zip_file.zip', downloads[0]['file_name'])
file_url = downloads[0]['url']
self.RestartBrowser(clear_profile=False)
# Trigger the download service to get loaded after restart.
self.NavigateToURL('chrome://downloads/')
# Verify that there's no download shelf anymore.
self.assertFalse(self.IsDownloadShelfVisible(),
'Download shelf persisted browser restart.')
# Verify that the download history persists.
downloads = self.GetDownloadsInfo().Downloads()
self.assertEqual(1, len(downloads))
self.assertEqual('a_zip_file.zip', downloads[0]['file_name'])
self.assertEqual(file_url, downloads[0]['url'])
self._DeleteAfterShutdown(downloaded_pkg)
def testExtendedAttributesOnMac(self):
"""Verify that Chrome sets the extended attributes on a file.
This test is for mac only.
"""
if not self.IsMac():
logging.info('Skipping testExtendedAttributesOnMac on non-Mac')
return
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
'a_zip_file.zip')
self._ClearLocalDownloadState(downloaded_pkg)
file_url = 'http://src.chromium.org/viewvc/chrome/trunk/src/chrome/'\
'test/data/downloads/a_zip_file.zip'
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
import xattr
self.assertTrue('com.apple.quarantine' in xattr.listxattr(downloaded_pkg))
def testDownloadPercentage(self):
"""Verify that during downloading, % values increases,
and once download is over, % value is 100"""
file_path = self._MakeFile(2**24)
# Ensure there's sufficient space remaining to download file.
free_space = test_utils.GetFreeSpace(self.GetDownloadDirectory().value())
assert free_space >= 2**24, \
'Not enough disk space to download. Got %d free' % free_space
file_url = self.GetFileURLForPath(file_path)
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
os.path.basename(file_path))
os.path.exists(downloaded_pkg) and os.remove(downloaded_pkg)
self.DownloadAndWaitForStart(file_url)
downloaded_pkg = os.path.join(self.GetDownloadDirectory().value(),
os.path.basename(file_path))
downloads = self.GetDownloadsInfo().Downloads()
old_percentage = downloads[0]['PercentComplete']
def _PercentInc():
percent = self.GetDownloadsInfo().Downloads()[0]['PercentComplete']
return old_percentage == 100 or percent > old_percentage,
self.assertTrue(self.WaitUntil(_PercentInc),
msg='Download percentage value is not increasing')
# Once download is completed, percentage is 100.
self.WaitForAllDownloadsToComplete()
downloads = self.GetDownloadsInfo().Downloads()
self.assertEqual(downloads[0]['PercentComplete'], 100,
'Download percentage should be 100 after download completed')
os.path.exists(file_path) and os.remove(file_path)
os.path.exists(downloaded_pkg) and os.remove(downloaded_pkg)
def testDownloadIncognitoAndRegular(self):
"""Download the same zip file in regular and incognito window and
verify that it downloaded correctly with same file name appended with
counter for the second download in regular window.
"""
test_dir = os.path.join(os.path.abspath(self.DataDir()), 'downloads')
file_path = os.path.join(test_dir, 'a_zip_file.zip')
file_url = self.GetFileURLForPath(file_path)
downloaded_pkg_regul = os.path.join(self.GetDownloadDirectory().value(),
'a_zip_file.zip')
downloaded_pkg_incog = os.path.join(self.GetDownloadDirectory().value(),
'a_zip_file (1).zip')
self._ClearLocalDownloadState(downloaded_pkg_regul)
self._ClearLocalDownloadState(downloaded_pkg_incog)
self.DownloadAndWaitForStart(file_url, 0)
self.WaitForAllDownloadsToComplete(windex=0)
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.DownloadAndWaitForStart(file_url, 1)
self.WaitForAllDownloadsToComplete(windex=1)
# Verify download in regular window.
self.assertTrue(os.path.exists(downloaded_pkg_regul))
self.assertTrue(self._EqualFileContents(file_path, downloaded_pkg_regul))
# Verify download in incognito window.
# bug 69738 WaitForAllDownloadsToComplete is flaky for this test case.
# Using extra WaitUntil until this is resolved.
self.assertTrue(self.WaitUntil(
lambda: os.path.exists(downloaded_pkg_incog)))
self.assertTrue(self._EqualFileContents(file_path, downloaded_pkg_incog))
if __name__ == '__main__':
pyauto_functional.Main()
| zcbenz/cefode-chromium | chrome/test/functional/downloads.py | Python | bsd-3-clause | 22,621 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for applying xla-sharding to a model."""
import contextlib
from typing import Dict, List, Optional, Sequence
from lingvo import compat as tf
from lingvo.core import py_utils_flags
from lingvo.core import thread_local_utils
import numpy as np
import sentencepiece as sentencepiece_processor
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
# pylint: enable=g-direct-tensorflow-import
ThreadLocalStack = thread_local_utils.ThreadLocalStack
def Split(x,
split_dimension,
num_devices,
use_sharding_op=True,
input_shape=None):
"""Wrapper for xla_sharding.split.
Args:
x: Tensor to annotate.
split_dimension: xla_sharding.split arg.
num_devices: xla_sharding.split arg.
use_sharding_op: If true, adds a sharding op to set the sharding: tensor =
gen_xla_ops.xla_sharding(tensor)
hyouklee@: use_sharding_op=False "It adds the sharding attribute to the op
itself. The outcome is that, that information could be lost by TF graph
transformations. Also, directly attaching the sharding annotation to the
op caused some compilation failures in the past (due to incompatible
shardings), so the plan is to make use_sharding_op to be the default."
"The only case I would set it to False today is when annotating weights.
Weight annotation does some special handling, so there may be some
changes needed in that logic if we add separate sharding op."
input_shape: The shape of the original tensor.
Returns:
Tensor conditionally annotated with sharding.
"""
if not py_utils_flags.use_tpu() or num_devices is None or not num_devices > 1:
return x
return xla_sharding.split(
x,
split_dimension,
num_devices,
input_shape=input_shape,
use_sharding_op=use_sharding_op,
)
def Replicate(x, use_sharding_op=True):
"""Wrapper of xla_sharding.replicate."""
if not py_utils_flags.use_tpu():
return x
return xla_sharding.replicate(x, use_sharding_op=use_sharding_op)
_MESH_SPLIT_DIM_PREFIXES = ThreadLocalStack()
_MANUAL_MESH_DIMS = ThreadLocalStack()
def GetMeshSplitSharding(device_mesh, tensor_split_dims_mapping):
"""Wrapper of xla_sharding.mesh_split_sharding()."""
# Apply the prefix in the context.
tensor_split_dims_mapping = (
_MESH_SPLIT_DIM_PREFIXES.stack + tensor_split_dims_mapping)
if _MANUAL_MESH_DIMS.stack:
return xla_sharding.mesh_split_sharding(
device_mesh,
tensor_split_dims_mapping,
manual_mesh_dims=_MANUAL_MESH_DIMS.stack)
# Do not include manual_mesh_dims to support legacy TF versions.
return xla_sharding.mesh_split_sharding(device_mesh,
tensor_split_dims_mapping)
def MeshSplit(x,
device_mesh,
tensor_split_dims_mapping,
use_sharding_op=True,
unspecified_dims=None):
"""Wrapper of xla_sharding.mesh_split()."""
if (not py_utils_flags.use_tpu() or tensor_split_dims_mapping is None or
device_mesh is None or device_mesh.size <= 1):
return x
# Apply the prefix in the context.
tensor_split_dims_mapping = (
_MESH_SPLIT_DIM_PREFIXES.stack + tensor_split_dims_mapping)
num_tiles = np.prod(
[device_mesh.shape[i] for i in tensor_split_dims_mapping if i >= 0])
if num_tiles <= 1:
return x
if _MANUAL_MESH_DIMS.stack or unspecified_dims:
return xla_sharding.mesh_split(
x,
device_mesh,
tensor_split_dims_mapping,
use_sharding_op=use_sharding_op,
manual_mesh_dims=_MANUAL_MESH_DIMS.stack,
unspecified_dims=unspecified_dims)
# Do not include manual_mesh_dims or unspecified_dims to support legacy TF
# versions.
return xla_sharding.mesh_split(
x,
device_mesh,
tensor_split_dims_mapping,
use_sharding_op=use_sharding_op)
@contextlib.contextmanager
def MeshSplitDimPrefixContext(prefix_mesh_dim):
"""Adds a prefix mesh dim for tensor_split_dims_mapping in MeshSplit."""
if prefix_mesh_dim is not None:
_MESH_SPLIT_DIM_PREFIXES.stack.append(prefix_mesh_dim)
try:
yield
finally:
if prefix_mesh_dim is not None:
_MESH_SPLIT_DIM_PREFIXES.stack.pop()
def GetMeshSplitDimPrefixContext():
return _MESH_SPLIT_DIM_PREFIXES.stack
@contextlib.contextmanager
def ManualMeshDimContext(mesh_dim):
"""Adds a context where mesh_dim is used for manual sharding."""
if mesh_dim is not None:
_MANUAL_MESH_DIMS.stack.append(mesh_dim)
try:
yield
finally:
if mesh_dim is not None:
_MANUAL_MESH_DIMS.stack.pop()
def ZigzagOrderOnDeviceMesh(device_mesh, zigzag_mesh_dim):
"""Permutes device_mesh to form zigzag order along zigzag_mesh_dim."""
# Where there is no wrap-around links along one edge, we might
# reduce all-reduce latency along that edge by permuting the device order:
# instead of
# 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7
# | |
# +---------------------------+
# it will be
# +-------+-------+-------+
# | | | |
# 0 - 7 1 6 2 5 3 - 4
# | | | |
# +-------+-------+-------+
xpose_dims = list(range(len(device_mesh.shape)))
xpose_dims[0] = zigzag_mesh_dim
xpose_dims[zigzag_mesh_dim] = 0
device_mesh = np.transpose(device_mesh, xpose_dims)
permuted_mesh = np.copy(device_mesh)
for i in range(device_mesh.shape[0]):
zigzag_i = i * 2 if i * 2 < device_mesh.shape[0] else (
device_mesh.shape[0] - i) * 2 - 1
permuted_mesh[i, ...] = device_mesh[zigzag_i, ...]
return np.transpose(permuted_mesh, xpose_dims)
def GetNonPod2dMesh(device_mesh_shape, physical_mesh_shape):
"""Returns a 2D device mesh on slices smaller than a pod."""
assert len(device_mesh_shape) == 2
assert len(physical_mesh_shape) == 3
if device_mesh_shape[1] != physical_mesh_shape[1] * physical_mesh_shape[2]:
tf.logging.warning(
'This only works when device_mesh_shape == [physical_mesh_shape[0], '
' physical_mesh_shape[1] * physical_mesh_shape[2]]. '
'If device_mesh_shape is [32, 16] where physical_mesh_shape is '
' [16, 16, 2]. we can transpose the result of this function '
'GetNonPod2dMesh([16, 32], [16, 16, 2]).')
# Form a ring on inner mesh dim.
device_mesh = np.reshape(
np.arange(0, np.product(device_mesh_shape)), physical_mesh_shape)
device_mesh = np.transpose(device_mesh, [0, 2, 1])
device_mesh[:, 1, :] = device_mesh[:, 1, ::-1]
# Next line: reshape back to mesh shape
device_mesh = np.reshape(device_mesh, device_mesh_shape)
# Next line: zigzag on outer mesh dim (8). It doesn't have wrap link, either.
device_mesh = ZigzagOrderOnDeviceMesh(device_mesh, zigzag_mesh_dim=0)
return device_mesh
def ReshapeDim(x, dim, dim_reshape_segments=None):
"""Reshapes tensor x according to dim_reshape_segments.
Args:
x: A input Tensor of shape [..., x.shape[dim], ...].
dim: The dim that needs to be reshaped.
dim_reshape_segments: The leading dim size of the reshaped dims.
Returns:
A Tensor of shape [..., dim_reshape_segments,
x.shape[dim] // dim_reshape_segments, ...].
"""
if dim_reshape_segments is None:
return x
assert x.shape[dim] % dim_reshape_segments == 0
new_shape = list(x.shape[0:dim])
new_shape.append(dim_reshape_segments)
new_shape.append(x.shape[dim] // dim_reshape_segments)
new_shape.extend(d for d in x.shape[dim + 1:])
return tf.reshape(x, new_shape)
class TensorShardingSpec:
"""Represents a sharding annotation for GShard/XLA."""
def __init__(self,
split_dims_mapping: Optional[List[int]] = None,
device_mesh: Optional[np.ndarray] = None,
uneven_padding: Optional[List[int]] = None):
"""Creates a sharding specification.
Args:
split_dims_mapping: a list of integers that map each tensor axis to the
device mesh axis along which it is sharded. Its length is the tensor
rank, and split_dims_mapping[i] is device mesh axis for tensor dimension
i. Use -1 for tensor dimensions that are not sharded. If the list is set
to None, the sharding will be treated as replicated.
device_mesh: a numpy.ndarray describing the topology of the device mesh
and each element is the ID of the device in the topology. Not needed for
replicated sharding, where it can be set to None.
uneven_padding: amount of padding applied to the right side of each tensor
dimension due to uneven partitioning of the shape in SPMD.
"""
self._split_dims_mapping: Optional[List[int]] = split_dims_mapping
self._device_mesh: Optional[np.ndarray] = device_mesh
self._uneven_padding = uneven_padding
@classmethod
def FromFullShape(cls, full_shape: Sequence[int],
split_dims_mapping: List[int], device_mesh: np.ndarray):
"""Creates tiled sharding spec with uneven padding computed from shape."""
uneven_padding = [0] * len(split_dims_mapping)
for i in range(len(split_dims_mapping)):
if split_dims_mapping[i] >= 0:
partitions = device_mesh.shape[split_dims_mapping[i]]
shard_size = (full_shape[i] + partitions - 1) // partitions
uneven_padding[i] = shard_size * partitions - full_shape[i]
return TensorShardingSpec(split_dims_mapping, device_mesh, uneven_padding)
def ApplyToTensor(self,
tensor: tf.Tensor,
use_sharding_op: bool = True) -> tf.Tensor:
if self.is_replicated:
return xla_sharding.replicate(tensor, use_sharding_op=use_sharding_op)
return MeshSplit(
tensor,
self.device_mesh,
self.split_dims_mapping,
use_sharding_op=use_sharding_op)
def ApplyToVariable(self, variable: tf.Variable) -> tf.Variable:
if self.is_replicated:
return xla_sharding.replicate(variable, use_sharding_op=False)
return MeshSplit(
variable,
self.device_mesh,
self.split_dims_mapping,
use_sharding_op=False)
def ShardShape(self, full_shape: Sequence[int]) -> Sequence[int]:
"""Returns the shape after applying this sharding to full_shape."""
if self.is_replicated:
return full_shape
shard_shape = list(full_shape)
for i in range(len(self._split_dims_mapping)):
if self._split_dims_mapping[i] >= 0:
partitions = self._device_mesh.shape[self._split_dims_mapping[i]]
shard_shape[i] = (full_shape[i] + partitions - 1) // partitions
return shard_shape
def ManualToAutoPartitioning(self, tensor: tf.Tensor) -> tf.Tensor:
"""Converts manually sharded tensor to full-size for auto partitioning."""
full_shape = list(tensor.shape)
if not self.is_replicated:
for i in range(len(self._split_dims_mapping)):
if self._split_dims_mapping[i] >= 0:
full_shape[i] *= self._device_mesh.shape[self._split_dims_mapping[i]]
if self._uneven_padding is not None and self._uneven_padding[i] > 0:
full_shape[i] -= self._uneven_padding[i]
return xla_sharding.manual_to_auto_spmd_partition(
tensor,
self.ToXlaOpSharding().SerializeToString(), full_shape)
def AutoToManualPartitioning(self, tensor: tf.Tensor) -> tf.Tensor:
"""Converts full-size tensor (auto partitioning) to manually sharded."""
manual = xla_sharding.auto_to_manual_spmd_partition(
tensor,
self.ToXlaOpSharding().SerializeToString())
xla_sharding.Sharding.manual().apply_to_tensor(manual)
return manual
def ToXlaOpSharding(self) -> xla_data_pb2.OpSharding:
if self.is_replicated:
return xla_sharding.Sharding.replicate().proto
dims_mapping = _MESH_SPLIT_DIM_PREFIXES.stack + self.split_dims_mapping
return xla_sharding.mesh_split_sharding(self.device_mesh,
dims_mapping).proto
@classmethod
def FromXlaOpSharding(
cls, op_sharding_proto: xla_data_pb2.OpSharding) -> 'TensorShardingSpec':
"""Parses from an XLA OpSharding proto."""
if op_sharding_proto.type == xla_data_pb2.OpSharding.OTHER:
device_mesh_shape = op_sharding_proto.tile_assignment_dimensions
device_mesh = np.reshape(
np.array(op_sharding_proto.tile_assignment_devices),
device_mesh_shape)
if op_sharding_proto.replicate_on_last_tile_dim:
split_dims_mapping = list(range(len(device_mesh_shape) - 1))
else:
split_dims_mapping = list(range(len(device_mesh_shape)))
prefix = _MESH_SPLIT_DIM_PREFIXES.stack
if prefix:
assert split_dims_mapping[:len(prefix)] == prefix
return cls(split_dims_mapping[len(prefix):], device_mesh)
else:
return cls.ReplicatedSpec()
def AddLeadingDims(self, num_dims: int = 1) -> 'TensorShardingSpec':
if self.is_replicated:
return self
new_padding = (None if self._uneven_padding is None else [0] * num_dims +
self._uneven_padding)
return TensorShardingSpec([-1] * num_dims + self._split_dims_mapping,
self.device_mesh, new_padding)
def RemoveLeadingDims(self, num_dims: int = 1) -> 'TensorShardingSpec':
if self.is_replicated:
return self
new_padding = (None if self._uneven_padding is None else
self._uneven_padding[num_dims:])
return TensorShardingSpec(self._split_dims_mapping[num_dims:],
self.device_mesh, new_padding)
def RemoveDim(self, dim) -> 'TensorShardingSpec':
"""Returns a copy of self with dimension 'dim' removed."""
if self.is_replicated:
return self
if dim < 0:
num_dims = len(self._split_dims_mapping)
dim = num_dims + dim
assert dim >= 0 and dim < len(self._split_dims_mapping)
new_padding = (None if self._uneven_padding is None else
self._uneven_padding[:dim] + self._uneven_padding[dim + 1:])
split_dims_mapping = (
self._split_dims_mapping[:dim] + self._split_dims_mapping[dim + 1:])
return TensorShardingSpec(split_dims_mapping, self.device_mesh, new_padding)
@classmethod
def ReplicatedSpec(cls):
return TensorShardingSpec()
@property
def split_dims_mapping(self) -> Optional[List[int]]:
return self._split_dims_mapping
@property
def device_mesh(self) -> Optional[np.ndarray]:
return self._device_mesh
@property
def is_replicated(self) -> bool:
if self.device_mesh is None or self.split_dims_mapping is None:
return True
for mesh_dim in self.split_dims_mapping:
if mesh_dim >= 0 and self.device_mesh.shape[mesh_dim] > 1:
return False
return True
@property
def mesh_dim_to_tensor_dim_mapping(self) -> Dict[int, int]:
mapping = {}
if self.is_replicated:
return mapping
for i in range(len(self.split_dims_mapping)):
if self.split_dims_mapping[i] >= 0:
mapping[self.split_dims_mapping[i]] = i
return mapping
@property
def uneven_padding(self) -> Optional[List[int]]:
return self._uneven_padding
def GetVarSharding(var: tf.Variable) -> TensorShardingSpec:
"""Returns the sharding directly attached to a variable."""
sharding = xla_sharding.get_op_sharding(var.op)
if not sharding:
return TensorShardingSpec.ReplicatedSpec()
proto = xla_data_pb2.OpSharding()
proto.ParseFromString(sharding)
spec_without_padding = TensorShardingSpec.FromXlaOpSharding(proto)
# Consider uneven padding.
return TensorShardingSpec.FromFullShape(
[int(d) for d in var.shape], spec_without_padding.split_dims_mapping,
spec_without_padding.device_mesh)
_spm_cache = {}
def LoadSpm(model_file):
"""Loads SPM from model_file. Returns SentencePieceProcessor."""
global _spm_cache
if model_file in _spm_cache:
return _spm_cache[model_file]
else:
spm = sentencepiece_processor.SentencePieceProcessor()
spm.Load(model_file)
_spm_cache[model_file] = spm
return spm
| tensorflow/lingvo | lingvo/core/gshard_utils.py | Python | apache-2.0 | 16,840 |
import unittest
from os.path import join, dirname
try:
from unittest import mock
except ImportError:
import mock
from binstar_client.utils.notebook import Downloader
files = {'files': [
{'basename': 'notebook', 'version': '1'},
{'basename': 'notebook', 'version': '2'},
{'basename': 'data', 'version': '2'}
]}
class DownloaderTestCase(unittest.TestCase):
def data_dir(self, filename):
test_data = join(dirname(__file__), 'data')
return join(test_data, filename)
def test_ensure_location(self):
aserver_api = mock.MagicMock()
aserver_api.package = mock.MagicMock(return_value=files)
downloader = Downloader(aserver_api, 'username', 'notebook')
self.assertEqual(downloader.list_files()[0]['version'], '2')
self.assertEqual(downloader.list_files()[1]['version'], '2')
def test_can_download(self):
package_1 = {'basename': 'notebook.ipynb'}
package_2 = {'basename': 'NOEXIST'}
downloader = Downloader('binstar', 'username', 'notebook')
downloader.output = self.data_dir('')
self.assertTrue(not downloader.can_download(package_1))
self.assertTrue(downloader.can_download(package_1, True))
self.assertTrue(downloader.can_download(package_2))
def test_list_old_files(self):
old_files = {'files': [{
'basename': 'old-notebook',
'version': '1.0.0',
'upload_time': '2015-04-02 22:32:31.253000+00:00'
}]}
aserver_api = mock.MagicMock()
aserver_api.package = mock.MagicMock(return_value=old_files)
downloader = Downloader(aserver_api, 'username', 'notebook')
self.assertEqual(downloader.list_files()[0]['version'], '1.0.0')
if __name__ == '__main__':
unittest.main()
| GiovanniConserva/TestDeploy | venv/Lib/site-packages/binstar_client/utils/notebook/tests/test_downloader.py | Python | bsd-3-clause | 1,802 |
import _plotly_utils.basevalidators
class ColorsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="colors", parent_name="funnelarea.marker", **kwargs):
super(ColorsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/funnelarea/marker/_colors.py | Python | mit | 406 |
from flask import Blueprint
from flask_restful import Api, Resource, reqparse, inputs
from splice.queries.distribution import get_possible_distributions
from splice.web.api.tile_upload import artifacts_upload
dist_bp = Blueprint('api.distributions', __name__, url_prefix='/api')
api = Api(dist_bp)
arg_parser = reqparse.RequestParser()
arg_parser.add_argument(
'date', type=inputs.date, required=False, help='date',
location='args', store_missing=False)
arg_parser.add_argument(
'channel_id', type=int, required=False, help='Channel ID',
location='args', store_missing=False)
class DistributionAPI(Resource):
def __init__(self):
super(DistributionAPI, self).__init__()
def get(self):
"""Returns the distributions on a specific date"""
args = arg_parser.parse_args()
artifacts = get_possible_distributions(today=args.get('date'),
channel_id=args.get('channel_id'))
if artifacts:
return {"results": artifacts}
else:
return {"message": "No ditribution found on that date"}, 404
def post(self): # progma: no cover
"""Deploy the current distribution to S3"""
ret = {}
args = arg_parser.parse_args()
try:
channel_artifacts = get_possible_distributions(today=args.get('date'),
channel_id=args.get('channel_id'))
for channel, artifacts in channel_artifacts.items():
urls = artifacts_upload(artifacts)
ret[channel] = urls
return {"results": ret}, 201
except Exception as e:
return {"message": "%s" % e}, 400
api.add_resource(DistributionAPI, '/distributions', endpoint='distributions')
def register_routes(app):
app.register_blueprint(dist_bp)
| ncloudioj/splice | splice/web/api/distribution.py | Python | mpl-2.0 | 1,877 |
"""Testing for K-Medoids"""
import warnings
import numpy as np
from unittest import mock
from scipy.sparse import csc_matrix
import pytest
from sklearn.datasets import load_iris, fetch_20newsgroups_vectorized
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import euclidean_distances, cosine_distances
from numpy.testing import assert_allclose, assert_array_equal
from sklearn_extra.cluster import KMedoids, CLARA
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
seed = 0
X = np.random.RandomState(seed).rand(100, 5)
# test kmedoid's results
rng = np.random.RandomState(seed)
X_cc, y_cc = make_blobs(
n_samples=100,
centers=np.array([[-1, -1], [1, 1]]),
random_state=rng,
shuffle=False,
cluster_std=0.2,
)
@pytest.mark.parametrize("method", ["alternate", "pam"])
@pytest.mark.parametrize(
"init", ["random", "heuristic", "build", "k-medoids++"]
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmedoid_results(method, init, dtype):
expected = np.hstack([np.zeros(50), np.ones(50)])
km = KMedoids(n_clusters=2, init=init, method=method, random_state=rng)
km.fit(X_cc.astype(dtype))
# This test use data that are not perfectly separable so the
# accuracy is not 1. Accuracy around 0.85
assert (np.mean(km.labels_ == expected) > 0.8) or (
1 - np.mean(km.labels_ == expected) > 0.8
)
assert dtype is np.dtype(km.cluster_centers_.dtype).type
assert dtype is np.dtype(km.transform(X_cc.astype(dtype)).dtype).type
@pytest.mark.parametrize("method", ["alternate", "pam"])
@pytest.mark.parametrize(
"init", ["random", "heuristic", "build", "k-medoids++"]
)
def test_kmedoid_nclusters(method, init):
n_clusters = 50
km = KMedoids(
n_clusters=n_clusters,
init=init,
method=method,
max_iter=1,
random_state=rng,
)
km.fit(X_cc)
assert len(np.unique(km.medoid_indices_)) == n_clusters
def test_clara_results():
expected = np.hstack([np.zeros(50), np.ones(50)])
km = CLARA(n_clusters=2)
km.fit(X_cc)
# This test use data that are not perfectly separable so the
# accuracy is not 1. Accuracy around 0.85
assert (np.mean(km.labels_ == expected) > 0.8) or (
1 - np.mean(km.labels_ == expected) > 0.8
)
def test_medoids_invalid_method():
with pytest.raises(ValueError, match="invalid is not supported"):
KMedoids(n_clusters=1, method="invalid").fit([[0, 1], [1, 1]])
def test_medoids_invalid_init():
with pytest.raises(ValueError, match="init needs to be one of"):
KMedoids(n_clusters=1, init="invalid").fit([[0, 1], [1, 1]])
def test_kmedoids_input_validation_and_fit_check():
rng = np.random.RandomState(seed)
# Invalid parameters
msg = "n_clusters should be a nonnegative integer. 0 was given"
with pytest.raises(ValueError, match=msg):
KMedoids(n_clusters=0).fit(X)
msg = "n_clusters should be a nonnegative integer. None was given"
with pytest.raises(ValueError, match=msg):
KMedoids(n_clusters=None).fit(X)
msg = "max_iter should be a nonnegative integer. -1 was given"
with pytest.raises(ValueError, match=msg):
KMedoids(n_clusters=1, max_iter=-1).fit(X)
msg = "max_iter should be a nonnegative integer. None was given"
with pytest.raises(ValueError, match=msg):
KMedoids(n_clusters=1, max_iter=None).fit(X)
msg = (
r"init needs to be one of the following: "
r".*random.*heuristic.*k-medoids\+\+"
)
with pytest.raises(ValueError, match=msg):
KMedoids(init=None).fit(X)
# Trying to fit 3 samples to 8 clusters
msg = (
"The number of medoids \(8\) must be less "
"than the number of samples 5."
)
Xsmall = rng.rand(5, 2)
with pytest.raises(ValueError, match=msg):
KMedoids(n_clusters=8).fit(Xsmall)
def test_random_deterministic():
"""Random_state should determine 'random' init output."""
rng = np.random.RandomState(seed)
X = load_iris()["data"]
D = euclidean_distances(X)
medoids = KMedoids(init="random")._initialize_medoids(D, 4, rng)
assert_array_equal(medoids, [114, 62, 33, 107])
def test_heuristic_deterministic():
"""Result of heuristic init method should not depend on rnadom state."""
rng1 = np.random.RandomState(1)
rng2 = np.random.RandomState(2)
X = load_iris()["data"]
D = euclidean_distances(X)
medoids_1 = KMedoids(init="heuristic")._initialize_medoids(D, 10, rng1)
medoids_2 = KMedoids(init="heuristic")._initialize_medoids(D, 10, rng2)
assert_array_equal(medoids_1, medoids_2)
def test_update_medoid_idxs_empty_cluster():
"""Label is unchanged for an empty cluster."""
D = np.zeros((3, 3))
labels = np.array([0, 0, 0])
medoid_idxs = np.array([0, 1])
kmedoids = KMedoids(n_clusters=2)
# Swallow empty cluster warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
kmedoids._update_medoid_idxs_in_place(D, labels, medoid_idxs)
assert_array_equal(medoid_idxs, [0, 1])
def test_kmedoids_empty_clusters():
"""When a cluster is empty, it should throw a warning."""
rng = np.random.RandomState(seed)
X = [[1], [1], [1]]
kmedoids = KMedoids(n_clusters=2, random_state=rng)
with pytest.warns(UserWarning, match="Cluster 1 is empty!"):
kmedoids.fit(X)
@mock.patch.object(KMedoids, "_kpp_init", return_value=object())
def test_kpp_called(_kpp_init_mocked):
"""KMedoids._kpp_init method should be called by _initialize_medoids"""
D = np.array([[0, 1], [1, 0]])
n_clusters = 2
rng = np.random.RandomState(seed)
kmedoids = KMedoids()
kmedoids.init = "k-medoids++"
# set _kpp_init_mocked.return_value to a singleton
initial_medoids = kmedoids._initialize_medoids(D, n_clusters, rng)
# assert that _kpp_init was called and its result was returned.
_kpp_init_mocked.assert_called_once_with(D, n_clusters, rng)
assert initial_medoids == _kpp_init_mocked.return_value
def test_kmedoids_pp():
"""Initial clusters should be well-separated for k-medoids++"""
rng = np.random.RandomState(seed)
kmedoids = KMedoids()
X = [
[10, 0],
[11, 0],
[0, 10],
[0, 11],
[10, 10],
[11, 10],
[12, 10],
[10, 11],
]
D = euclidean_distances(X)
centers = kmedoids._kpp_init(D, n_clusters=3, random_state_=rng)
assert len(centers) == 3
inter_medoid_distances = D[centers][:, centers]
assert np.all((inter_medoid_distances > 5) | (inter_medoid_distances == 0))
def test_precomputed():
"""Test the 'precomputed' distance metric."""
rng = np.random.RandomState(seed)
X_1 = [[1.0, 0.0], [1.1, 0.0], [0.0, 1.0], [0.0, 1.1]]
D_1 = euclidean_distances(X_1)
X_2 = [[1.1, 0.0], [0.0, 0.9]]
D_2 = euclidean_distances(X_2, X_1)
kmedoids = KMedoids(metric="precomputed", n_clusters=2, random_state=rng)
kmedoids.fit(D_1)
assert_allclose(kmedoids.inertia_, 0.2)
assert_array_equal(kmedoids.medoid_indices_, [2, 0])
assert_array_equal(kmedoids.labels_, [1, 1, 0, 0])
assert kmedoids.cluster_centers_ is None
med_1, med_2 = tuple(kmedoids.medoid_indices_)
predictions = kmedoids.predict(D_2)
assert_array_equal(predictions, [med_1 // 2, med_2 // 2])
transformed = kmedoids.transform(D_2)
assert_array_equal(transformed, D_2[:, kmedoids.medoid_indices_])
def test_kmedoids_fit_naive():
n_clusters = 3
metric = "euclidean"
model = KMedoids(n_clusters=n_clusters, metric=metric)
Xnaive = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
model.fit(Xnaive)
assert_array_equal(
model.cluster_centers_, [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
)
assert_array_equal(model.labels_, [0, 1, 2])
assert model.inertia_ == 0.0
# diagonal must be zero, off-diagonals must be positive
X_new = model.transform(Xnaive)
for c in range(n_clusters):
assert X_new[c, c] == 0
for c2 in range(n_clusters):
if c != c2:
assert X_new[c, c2] > 0
def test_max_iter():
"""Test that warning message is thrown when max_iter is reached."""
rng = np.random.RandomState(seed)
X_iris = load_iris()["data"]
model = KMedoids(
n_clusters=10, init="random", random_state=rng, max_iter=1
)
msg = "Maximum number of iteration reached before"
with pytest.warns(UserWarning, match=msg):
model.fit(X_iris)
def test_kmedoids_iris():
"""Test kmedoids on the Iris dataset"""
rng = np.random.RandomState(seed)
X_iris = load_iris()["data"]
ref_model = KMeans(n_clusters=3).fit(X_iris)
avg_dist_to_closest_centroid = (
ref_model.transform(X_iris).min(axis=1).mean()
)
for init in ["random", "heuristic", "k-medoids++"]:
distance_metric = "euclidean"
model = KMedoids(
n_clusters=3, metric=distance_metric, init=init, random_state=rng
)
model.fit(X_iris)
# test convergence in reasonable number of steps
assert model.n_iter_ < (len(X_iris) // 10)
distances = PAIRWISE_DISTANCE_FUNCTIONS[distance_metric](X_iris)
avg_dist_to_random_medoid = np.mean(distances.ravel())
avg_dist_to_closest_medoid = model.inertia_ / X_iris.shape[0]
# We want distance-to-closest-medoid to be reduced from average
# distance by more than 50%
assert avg_dist_to_random_medoid > 2 * avg_dist_to_closest_medoid
# When K-Medoids is using Euclidean distance,
# we can compare its performance to
# K-Means. We want the average distance to cluster centers
# to be similar between K-Means and K-Medoids
assert_allclose(
avg_dist_to_closest_medoid, avg_dist_to_closest_centroid, rtol=0.1
)
def test_kmedoids_fit_predict_transform():
rng = np.random.RandomState(seed)
model = KMedoids(random_state=rng)
labels1 = model.fit_predict(X)
assert len(labels1) == 100
assert_array_equal(labels1, model.labels_)
labels2 = model.predict(X)
assert_array_equal(labels1, labels2)
Xt1 = model.fit_transform(X)
assert_array_equal(Xt1.shape, (100, model.n_clusters))
Xt2 = model.transform(X)
assert_array_equal(Xt1, Xt2)
def test_callable_distance_metric():
rng = np.random.RandomState(seed)
def my_metric(a, b):
return np.sqrt(np.sum(np.power(a - b, 2)))
model = KMedoids(random_state=rng, metric=my_metric)
labels1 = model.fit_predict(X)
assert len(labels1) == 100
assert_array_equal(labels1, model.labels_)
def test_outlier_robustness():
rng = np.random.RandomState(seed)
kmeans = KMeans(n_clusters=2, random_state=rng)
kmedoids = KMedoids(n_clusters=2, random_state=rng)
X = [[-11, 0], [-10, 0], [-9, 0], [0, 0], [1, 0], [2, 0], [1000, 0]]
kmeans.fit(X)
kmedoids.fit(X)
assert_array_equal(kmeans.labels_, [0, 0, 0, 0, 0, 0, 1])
assert_array_equal(kmedoids.labels_, [0, 0, 0, 1, 1, 1, 1])
def test_kmedoids_on_sparse_input():
rng = np.random.RandomState(seed)
model = KMedoids(n_clusters=2, random_state=rng)
row = np.array([1, 0])
col = np.array([0, 4])
data = np.array([1, 1])
X = csc_matrix((data, (row, col)), shape=(2, 5))
labels = model.fit_predict(X)
assert len(labels) == 2
assert_array_equal(labels, model.labels_)
# Test the build initialization.
def test_build():
X, y = fetch_20newsgroups_vectorized(return_X_y=True)
# Select only the first 500 samples
X = X[:500]
y = y[:500]
# Precompute cosine distance matrix
diss = cosine_distances(X)
# run build
ske = KMedoids(20, "precomputed", init="build", max_iter=0)
ske.fit(diss)
assert ske.inertia_ <= 230
assert len(np.unique(ske.labels_)) == 20
def test_clara_consistency_iris():
# test that CLARA is PAM when full sample is used
rng = np.random.RandomState(seed)
X_iris = load_iris()["data"]
clara = CLARA(
n_clusters=3,
n_sampling_iter=1,
n_sampling=len(X_iris),
random_state=rng,
)
model = KMedoids(n_clusters=3, init="build", random_state=rng)
model.fit(X_iris)
clara.fit(X_iris)
assert np.sum(model.labels_ == clara.labels_) == len(X_iris)
def test_seuclidean():
with pytest.warns(None) as record:
km = KMedoids(2, metric="seuclidean", method="pam")
km.fit(np.array([0, 0, 0, 1]).reshape((4, 1)))
km.predict(np.array([0, 0, 0, 1]).reshape((4, 1)))
km.transform(np.array([0, 0, 0, 1]).reshape((4, 1)))
assert len(record) == 0
def test_medoids_indices():
rng = np.random.RandomState(seed)
X_iris = load_iris()["data"]
clara = CLARA(
n_clusters=3,
n_sampling_iter=1,
n_sampling=len(X_iris),
random_state=rng,
)
model = KMedoids(n_clusters=3, init="build", random_state=rng)
centroids = np.array([X_iris[0], X_iris[50]])
array_like_model = KMedoids(
n_clusters=len(centroids), init=centroids, max_iter=0
)
model.fit(X_iris)
clara.fit(X_iris)
array_like_model.fit(X_iris)
assert_array_equal(X_iris[model.medoid_indices_], model.cluster_centers_)
assert_array_equal(X_iris[clara.medoid_indices_], clara.cluster_centers_)
assert_array_equal(centroids, array_like_model.cluster_centers_)
def test_array_like_init():
centroids = np.array([X_cc[0], X_cc[50]])
expected = np.hstack([np.zeros(50), np.ones(50)])
km = KMedoids(n_clusters=len(centroids), init=centroids)
km.fit(X_cc)
# # This test use data that are not perfectly separable so the
# # accuracy is not 1. Accuracy around 0.85
assert (np.mean(km.labels_ == expected) > 0.8) or (
1 - np.mean(km.labels_ == expected) > 0.8
)
# Override n_clusters if array-like init method is used
km = KMedoids(n_clusters=len(centroids) + 2, init=centroids)
km.fit(X_cc)
assert len(km.cluster_centers_) == len(centroids)
| scikit-learn-contrib/scikit-learn-extra | sklearn_extra/cluster/tests/test_k_medoids.py | Python | bsd-3-clause | 14,171 |
import numpy as np
import cv2
import rospy
from cv_bridge import CvBridge, CvBridgeError
import math
import vision_utils
from sub_vision.msg import TrackObjectFeedback
from sensor_msgs.msg import Image
from sub_vision.msg import feedback
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def nlargest(n, contours, key):
largestContours = []
if key == cv2.contourArea:
sortedContours = sorted(contours, key=cv2.contourArea, reverse=True)
for i in range(n):
largestContours.append(sortedContours[i])
return largestContours
def greatestNAreaContours(contours, n):
return nlargest(n, contours, key=cv2.contourArea)
class GateFinder:
def __init__(self):
self.bridge = CvBridge()
self.image_pub = rospy.Publisher("/thresh_image", Image, queue_size=10)
self.feedback_pub = rospy.Publisher("/gate_feedback", feedback, queue_size=10)
self.feedback_msg = feedback()
def normalsFromAllCorners(self, corners, disparities):
valid = []
for idx, val in enumerate(disparities):
if val > 0:
valid.append(idx)
combos = vision_utils.cartesian((valid, valid, valid))
normals = []
for combo in combos: #Find all possible cross products of the available points
if combo[0] != combo[1] and combo[1] != combo[2] and combo[0] != combo[2]:
new = np.cross(corners[combo[1]] - corners[combo[0]], corners[combo[2]] - corners[combo[0]])
if new.max() > 0:
new = np.divide(new, new.max()) #normalize
if np.dot(new, np.array([-1,0,0])) < 0:
normals.append(-new)
else:
normals.append(new)
return normals
def process(self, imageLeftRect, imageRightRect, imageDisparityRect, cameraModel, stereoCameraModel, upper, lower):
assert(imageLeftRect is not None)
feedback = TrackObjectFeedback()
feedback.found = False
imageHLS = cv2.cvtColor(imageLeftRect, cv2.COLOR_BGR2HLS)
lower = np.array([0,70,50], dtype = 'uint8')
upper = np.array([200,255,255], dtype='uint8')
mask=cv2.inRange(imageHLS, lower,upper) #HLS thresholds
output = cv2.bitwise_and(imageLeftRect, imageLeftRect, mask=mask)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(imageLeftRect, "bgr8"))
#mask=cv2.inRange(imageHSV, np.array([20,30,80],dtype='uint8'),np.array([40,52,120],dtype='uint8'))
cnts = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = cnts[1]
if len(contours) == 0:
print("No contours")
return feedback
rects = []
for contour in contours: #adapted from https://github.com/opencv/opencv/blob/master/samples/python/squares.py
epsilon = cv2.arcLength(contour, True)*0.05
contour = cv2.approxPolyDP(contour, epsilon, True)
if len(contour) == 4 and cv2.isContourConvex(contour):
contour = contour.reshape(-1, 2)
max_cos = np.max([angle_cos( contour[i], contour[(i+1) % 4], contour[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
rects.append(contour)
if len(rects) > 1:
rects = greatestNAreaContours(rects, 2)
rect1 = list(cv2.minAreaRect(rects[0]))
rect2 = list(cv2.minAreaRect(rects[1]))
if(rect1[1][0] < rect1[1][1]): #Fix wonky angles from opencv (I think)
rect1[2] = (rect1[2] + 180) * 180/3.141
else:
rect1[2] = (rect1[2] + 90) * 180/3.141
if(rect2[1][0] < rect2[1][1]):
rect2[2] = (rect2[2] + 180) * 180/3.141
else:
rect2[2] = (rect2[2] + 90) * 180/3.141
gateCenter = (int((rect1[0][0] + rect2[0][0])/2), int((rect1[0][1] + rect2[0][1])/2))
self.feedback_msg.center = gateCenter
self.feedback_msg.size = imageRightRect.shape
self.feedback_pub.publish(self.feedback_msg)
#feedback.center = gateCenter
#feedback.size = imageRightRect.shape
if gateCenter[0] - rect1[0][0] > 0:
feedback.width = (rect2[0][0]+(rect2[1][0]/2)) - (rect1[0][0] - (rect1[1][0]/2))
else:
feedback.width = (rect1[0][0] -(rect1[1][0]/2)) - (rect2[0][0]+(rect2[1][0]/2))
feedback.height = rect1[1][1]
feedback.found = True
return feedback
| RoboticsClubatUCF/RoboSub | ucf_sub_catkin_ros/src/sub_vision/src/gatefinder.py | Python | mit | 4,674 |
from bs4 import BeautifulSoup
from datetime import datetime
from scraper import *
from general import General
import traceback
def get_arrival_time(arrival_time_str):
arrival_time_strip = arrival_time_str.split(' ')[0]
time = datetime.strptime(arrival_time_strip, '%H:%M:%S').time()
now = datetime.now()
arrival_time = datetime.combine(now, time)
return arrival_time
class Movement(Scraper):
def __init__(self, browser, config):
super(Movement, self).__init__(browser, config)
self.general_client = General(browser, config)
def get_fleet_movement_from_movement_page(self):
"""
Deprecated, use get_fleet_movement instead
:return:
"""
url = self.url_provider.get_page_url('movement')
res = self.open_url(url)
soup = BeautifulSoup(res.read(), "lxml")
movement_nodes = soup.findAll("div", {"class": "fleetDetails detailsOpened"})
fleet_movements = []
for movement_node in movement_nodes:
mission_code = int( movement_node['data-mission-type'] )
mission_type = self.mission_types[ mission_code ]
origin_planet_coords = self.parse_coords(movement_node.find("span", {"class": "originCoords"}).text)
origin_planet_name = movement_node.find("span", {"class": "originPlanet"}).text.strip()
destination_coords = self.parse_coords(
movement_node.find("span", {"class": "destinationCoords tooltip"}).text)
movement = FleetMovement(origin_planet_coords, origin_planet_name, destination_coords)
movement.mission = mission_type
fleet_movements.append(movement)
return fleet_movements
def get_fleet_movement(self):
url = self.url_provider.get_page_url('eventList')
res = self.open_url(url)
soup = BeautifulSoup(res.read(), "lxml")
movement_table = soup.find("table", {"id": "eventContent"})
movement_rows = movement_table.findAll("tr", {"class": "eventFleet"})
fleet_movements = []
for movement_row in movement_rows:
try:
mission_code = int( movement_row['data-mission-type'] )
mission_type = self.mission_types[ mission_code ]
origin_coords = self.parse_coords(movement_row.find("td", {"class": "coordsOrigin"}).text.strip())
origin_planet_name = movement_row.find("td", {"class": "originFleet"}).text.strip()
dest_coords = self.parse_coords(movement_row.find("td", {"class": "destCoords"}).text.strip())
dest_planet_data = movement_row.find("td", {"class": "destFleet"})
dest_planet_name = movement_row.find("td", {"class": "destFleet"}).text.strip()
isMoon = False if dest_planet_data.find("figure", {"class": "moon"}) is None else True
count_down_td = movement_row.find("td", {"class": "countDown"})
is_friendly = 'friendly' in count_down_td.attrs['class']
arrival_time_str = movement_row.find("td", {"class": "arrivalTime"}).text
arrival_time = get_arrival_time(arrival_time_str)
countdown_time = self.get_countdown_time(arrival_time)
movement = FleetMovement(origin_coords, origin_planet_name, dest_coords, dest_planet_name, is_friendly,
arrival_time, countdown_time, mission_type, isMoon)
fleet_movements.append(movement)
except Exception as e:
exception_message = traceback.format_exc()
self.logger.error(exception_message)
return fleet_movements
def get_countdown_time(self, arrival_time):
game_time = self.general_client.get_game_datetime()
return arrival_time - game_time
@staticmethod
def parse_coords(text):
return text.replace('[', '').replace(']', '')
def get_fleet_slots_usage(self):
"""
Get fleet slot usage data. Only works if there is at least 1 fleet in movement
"""
url = self.url_provider.get_page_url('movement')
res = self.open_url(url)
soup = BeautifulSoup(res.read())
slots_info_node = soup.find("span", {"class", "fleetSlots"})
if slots_info_node is not None:
current_slots = int(slots_info_node.find("span", {"class", "current"}).text)
all_slots = int(slots_info_node.find("span", {"class", "all"}).text)
else:
current_slots = 0
all_slots = 1
return current_slots, all_slots
| yosh778/OG-Bot | ogbot/scraping/movement.py | Python | mit | 4,623 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class BasePanels(horizon.PanelGroup):
slug = "compute"
name = _("Manage Compute")
panels = ('overview',
'instances_and_volumes',
'images_and_snapshots',
'access_and_security',
'networks')
class ObjectStorePanels(horizon.PanelGroup):
slug = "object_store"
name = _("Object Store")
panels = ('containers',)
class Nova(horizon.Dashboard):
name = _("Project")
slug = "nova"
panels = (BasePanels, ObjectStorePanels)
default_panel = 'overview'
supports_tenants = True
horizon.register(Nova)
| asomya/test | horizon/dashboards/nova/dashboard.py | Python | apache-2.0 | 1,315 |
from datetime import datetime, timedelta, timezone
from email.headerregistry import Address
import signal
from typing import Optional
import attr
from eletter import BytesAttachment
import pytest
from pytest_mock import MockerFixture
from daemail import util
from daemail.message import DraftMessage
from daemail.reporter import CommandReporter
from daemail.runner import CommandError, CommandResult
w4 = timezone(timedelta(hours=-4))
@pytest.mark.parametrize(
"result,subject,body",
[
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
"> This is the output.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"",
stderr=b"",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output: none\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"This is the stderr.\n",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
"> This is the output.\n"
"\n"
"Error Output:\n"
"> This is the stderr.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"",
stderr=b"This is the stderr.\n",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output: none\n"
"\n"
"Error Output:\n"
"> This is the stderr.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=None,
stderr=b"This is the stderr.\n",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Error Output:\n"
"> This is the stderr.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=None,
stderr=None,
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=42,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[FAILED] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 42\n"
"\n"
"Output:\n"
"> This is the output.\n",
),
pytest.param(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=-2,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[FAILED] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: -2 (SIGINT)\n"
"\n"
"Output:\n"
"> This is the output.\n",
marks=pytest.mark.skipif(
getattr(signal, "SIGINT", None) != 2,
reason="SIGINT is not 2 on this platform",
),
),
pytest.param(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=-65,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[FAILED] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: -65\n"
"\n"
"Output:\n"
"> This is the output.\n",
marks=pytest.mark.skipif(
any(s.value == 65 for s in signal.Signals),
reason="This platform has a signal #65",
),
),
],
)
def test_report_plain_message(
mocker: MockerFixture,
result: CommandResult,
subject: str,
body: str,
) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg) == {
"to_addrs": to_addrs,
"subject": subject,
"from_addr": from_addr,
"parts": [body],
}
show_argv_spy.assert_called_once_with(*result.argv)
@pytest.mark.parametrize("failure_only", [False, True])
@pytest.mark.parametrize("nonempty", [False, True])
def test_report_command_error(
mocker: MockerFixture,
failure_only: bool,
nonempty: bool,
) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandError(
argv=["foo", "-x", "bar.txt"],
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
tb=(
"Traceback (most recent call last):\n"
" ...\n"
"FakeError: Let's pretend this really happened\n"
),
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=failure_only,
from_addr=from_addr,
mime_type=None,
nonempty=nonempty,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg) == {
"to_addrs": to_addrs,
"subject": "[ERROR] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
"An error occurred while attempting to run the command:\n"
"> Traceback (most recent call last):\n"
"> ...\n"
"> FakeError: Let's pretend this really happened\n"
],
}
show_argv_spy.assert_called_once_with(*result.argv)
def test_report_stdout_mime(mocker: MockerFixture) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b'{"This": "is the output."}\n',
stderr=b"",
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type="application/json",
nonempty=False,
stderr_encoding="utf-8",
stdout_filename="stdout.html",
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg, recurse=False) == {
"to_addrs": to_addrs,
"subject": "[DONE] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
(
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
),
BytesAttachment(
b'{"This": "is the output."}\n',
"stdout.html",
content_type="application/json",
inline=True,
),
],
}
show_argv_spy.assert_called_once_with(*result.argv)
@pytest.mark.parametrize(
"result,subject,body",
[
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
"> This is the output.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"",
stderr=b"",
),
None,
None,
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"This is the stderr.\n",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
"> This is the output.\n"
"\n"
"Error Output:\n"
"> This is the stderr.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"",
stderr=b"This is the stderr.\n",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output: none\n"
"\n"
"Error Output:\n"
"> This is the stderr.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=None,
stderr=b"This is the stderr.\n",
),
"[DONE] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Error Output:\n"
"> This is the stderr.\n",
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=None,
stderr=None,
),
None,
None,
),
],
)
def test_report_nonempty(
result: CommandResult,
subject: Optional[str],
body: Optional[str],
) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=True,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
msg = reporter.report(result)
if body is None:
assert msg is None
else:
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg) == {
"to_addrs": to_addrs,
"subject": subject,
"from_addr": from_addr,
"parts": [body],
}
@pytest.mark.parametrize(
"result,subject,body",
[
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
None,
None,
),
(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=42,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[FAILED] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 42\n"
"\n"
"Output:\n"
"> This is the output.\n",
),
pytest.param(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=-2,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[FAILED] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: -2 (SIGINT)\n"
"\n"
"Output:\n"
"> This is the output.\n",
marks=pytest.mark.skipif(
getattr(signal, "SIGINT", None) != 2,
reason="SIGINT is not 2 on this platform",
),
),
pytest.param(
CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=-65,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
),
"[FAILED] foo -x bar.txt",
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: -65\n"
"\n"
"Output:\n"
"> This is the output.\n",
marks=pytest.mark.skipif(
any(s.value == 65 for s in signal.Signals),
reason="This platform has a signal #65",
),
),
],
)
def test_report_failure_only(
result: CommandResult,
subject: Optional[str],
body: Optional[str],
) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
reporter = CommandReporter(
encoding="utf-8",
failure_only=True,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
msg = reporter.report(result)
if body is None:
assert msg is None
else:
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg) == {
"to_addrs": to_addrs,
"subject": subject,
"from_addr": from_addr,
"parts": [body],
}
def test_report_utc(mocker: MockerFixture) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is the output.\n",
stderr=b"",
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=True,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg) == {
"to_addrs": to_addrs,
"subject": "[DONE] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
"Start Time: 2020-03-10 19:00:28.123456Z\n"
"End Time: 2020-03-10 19:01:27.654321Z\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
"> This is the output.\n",
],
}
show_argv_spy.assert_called_once_with(*result.argv)
@pytest.mark.parametrize("stderr", [b"", None])
def test_report_undecodable_stdout_empty_stderr(
mocker: MockerFixture,
stderr: Optional[bytes],
) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
stderr=stderr,
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg, recurse=False) == {
"to_addrs": to_addrs,
"subject": "[DONE] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
(
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
),
BytesAttachment(
b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
"stdout",
content_type="application/octet-stream",
inline=True,
),
],
}
show_argv_spy.assert_called_once_with(*result.argv)
def test_report_undecodable_stdout_good_stderr(mocker: MockerFixture) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
stderr=b"This is in ASCII.\n",
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg, recurse=False) == {
"to_addrs": to_addrs,
"subject": "[DONE] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
(
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
),
BytesAttachment(
b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
"stdout",
content_type="application/octet-stream",
inline=True,
),
"\nError Output:\n> This is in ASCII.\n",
],
}
show_argv_spy.assert_called_once_with(*result.argv)
def test_report_empty_stdout_undecodable_stderr(mocker: MockerFixture) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"",
stderr=b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg, recurse=False) == {
"to_addrs": to_addrs,
"subject": "[DONE] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
(
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output: none\n"
"\n"
"Error Output:\n"
),
BytesAttachment(
b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
"stderr",
content_type="application/octet-stream",
inline=True,
),
],
}
show_argv_spy.assert_called_once_with(*result.argv)
def test_report_good_stdout_undecodable_stderr(mocker: MockerFixture) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"This is in ASCII.\n",
stderr=b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg, recurse=False) == {
"to_addrs": to_addrs,
"subject": "[DONE] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
(
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
"> This is in ASCII.\n"
"\n"
"Error Output:\n"
),
BytesAttachment(
b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
"stderr",
content_type="application/octet-stream",
inline=True,
),
],
}
show_argv_spy.assert_called_once_with(*result.argv)
def test_report_undecodable_stdout_and_stderr(mocker: MockerFixture) -> None:
from_addr = Address("Command Reporter", addr_spec="[email protected]")
to_addrs = [Address("Re Cipient", addr_spec="[email protected]")]
result = CommandResult(
argv=["foo", "-x", "bar.txt"],
rc=0,
start=datetime(2020, 3, 10, 15, 0, 28, 123456, w4),
end=datetime(2020, 3, 10, 15, 1, 27, 654321, w4),
stdout=b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
stderr=b"\xE3\x88\x89\xA2@\x89\xA2@\x89\x95@\xC5\xC2\xC3\xC4\xC9\xC3K%",
)
reporter = CommandReporter(
encoding="utf-8",
failure_only=False,
from_addr=from_addr,
mime_type=None,
nonempty=False,
stderr_encoding="utf-8",
stdout_filename=None,
to_addrs=to_addrs,
utc=False,
)
show_argv_spy = mocker.spy(util, "show_argv")
msg = reporter.report(result)
assert isinstance(msg, DraftMessage)
assert attr.asdict(msg, recurse=False) == {
"to_addrs": to_addrs,
"subject": "[DONE] foo -x bar.txt",
"from_addr": from_addr,
"parts": [
(
"Start Time: 2020-03-10 15:00:28.123456-04:00\n"
"End Time: 2020-03-10 15:01:27.654321-04:00\n"
"Exit Status: 0\n"
"\n"
"Output:\n"
),
BytesAttachment(
b"\xD0is is i\xF1 L\xE1tin\xB9.\n",
"stdout",
content_type="application/octet-stream",
inline=True,
),
"\nError Output:\n",
BytesAttachment(
b"\xE3\x88\x89\xA2@\x89\xA2@\x89\x95@\xC5\xC2\xC3\xC4\xC9\xC3K%",
"stderr",
content_type="application/octet-stream",
inline=True,
),
],
}
show_argv_spy.assert_called_once_with(*result.argv)
| jwodder/daemail | test/test_reporter.py | Python | mit | 28,357 |
Subsets and Splits