repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
espona/ckanext-package_converter | ckanext/package_converter/model/scheming_resource_converter.py | 1 | 9800 | import collections
import json
from logging import getLogger
from ckan.lib.helpers import url_for
from ckan.common import config
from ckanext.package_converter.model.metadata_format import MetadataFormats
from ckanext.package_converter.model.scheming_converter import Datacite43SchemingConverter
from xmltodict import unparse, parse
log = getLogger(__name__)
class Datacite43SchemingResourceConverter(Datacite43SchemingConverter):
def __init__(self):
Datacite43SchemingConverter.__init__(self)
ckan_resource_base_format = MetadataFormats().get_metadata_formats('ckan_resource')[0]
self.input_format = ckan_resource_base_format
def _datacite_converter_schema(self, resource_dict):
try:
schema_map = self._get_schema_map(self.output_format.get_format_name())
metadata_resource_map = schema_map['metadata_resource']
datacite_dict = collections.OrderedDict()
# Header
datacite_dict['resource'] = collections.OrderedDict()
datacite_dict['resource']['@xsi:schemaLocation'] = '{namespace} {schema}'.format(
namespace=self.output_format.get_namespace(),
schema=self.output_format.get_xsd_url())
datacite_dict['resource']['@xmlns'] = '{namespace}'.format(namespace=self.output_format.get_namespace())
datacite_dict['resource']['@xmlns:xsi'] = 'http://www.w3.org/2001/XMLSchema-instance'
# Identifier*
datacite_identifier_tag = 'identifier'
datacite_dict['resource'][datacite_identifier_tag] = {
'#text': self._get_single_mapped_value(datacite_identifier_tag, resource_dict, metadata_resource_map),
'@identifierType': 'DOI'}
# Titles*
datacite_titles_tag = 'titles'
datacite_title_tag = 'title'
datacite_xml_lang_tag = 'xml:lang'
datacite_dict['resource'][datacite_titles_tag] = {datacite_title_tag: []}
datacite_title_type_tag = 'titleType'
ckan_titles = self._get_complex_mapped_value(datacite_titles_tag, datacite_title_tag,
['', datacite_title_type_tag, datacite_xml_lang_tag],
resource_dict, metadata_resource_map)
for ckan_title in ckan_titles:
datacite_title = {'#text': ckan_title.get(datacite_title_tag, ''),
'@' + datacite_xml_lang_tag: ckan_title.get(
self._joinTags([datacite_title_tag, datacite_xml_lang_tag]), 'en-us')}
if ckan_title.get(self._joinTags([datacite_title_tag, datacite_title_type_tag]), ''):
ckan_title_type = ckan_title.get(self._joinTags([datacite_title_tag, datacite_title_type_tag]),
'other')
datacite_title['@' + datacite_title_type_tag] = self._valueToDataciteCV(ckan_title_type,
datacite_title_type_tag)
datacite_dict['resource'][datacite_titles_tag][datacite_title_tag] += [datacite_title]
# Alternate Identifier (CKAN URL) Decide which is landing page, resource or package
ckan_resource_url = config.get('ckan.site_url', '') + url_for(controller='dataset_resource',
action='read',
id=resource_dict.get('package_id',
''),
resource_id=resource_dict.get('id',
''))
datacite_dict['resource']['alternateIdentifiers'] = {
'alternateIdentifier': [{'#text': ckan_resource_url, '@alternateIdentifierType': 'URL'}]}
# Sizes (not defined in scheming, taken from default CKAN resource)
datacite_size_group_tag = 'sizes'
datacite_size_tag = 'size'
datacite_sizes = []
log.debug('** SIZE *** ' + resource_dict.get('resource_size', ''))
if resource_dict.get('size', ''):
datacite_sizes += [{'#text': str(resource_dict.get('size', ' ')) + ' bytes'}]
elif resource_dict.get('resource_size', ''):
resource_size = resource_dict.get('resource_size', '')
try:
resource_size_obj = json.loads(resource_size)
datacite_sizes += [{'#text': resource_size_obj.get('size_value', '0') + ' ' + resource_size_obj.get(
'size_unit', 'KB').upper()}]
except:
log.error('unparseable value at resource_size:' + str(resource_size))
if datacite_sizes:
datacite_dict['resource'][datacite_size_group_tag] = {datacite_size_tag: datacite_sizes}
# Formats (get from resources)
datacite_format_group_tag = 'formats'
datacite_format_tag = 'format'
datacite_formats = []
resource_format = self._get_single_mapped_value(
self._joinTags([datacite_format_group_tag, datacite_format_tag]),
resource_dict, metadata_resource_map,
default=resource_dict.get('mimetype', resource_dict.get('mimetype_inner', '')))
if resource_format:
datacite_formats += [{'#text': resource_format}]
if datacite_formats:
datacite_dict['resource'][datacite_format_group_tag] = {datacite_format_tag: datacite_formats}
# Version
datacite_version_tag = 'version'
datacite_version = self._get_single_mapped_value(datacite_version_tag, resource_dict, metadata_resource_map,
'')
if datacite_version:
datacite_dict['resource'][datacite_version_tag] = {'#text': datacite_version}
# Description
datacite_descriptions_tag = 'descriptions'
datacite_description_tag = 'description'
datacite_description_type_tag = 'descriptionType'
datacite_descriptions = []
ckan_descriptions = self._get_complex_mapped_value(datacite_descriptions_tag, datacite_description_tag,
['', datacite_xml_lang_tag,
datacite_description_type_tag], resource_dict,
metadata_resource_map)
for ckan_description in ckan_descriptions:
datacite_description = {'#text': ckan_description.get(datacite_description_tag, ''),
'@' + datacite_description_type_tag: ckan_description.get(
self._joinTags([datacite_description_tag, datacite_description_type_tag]),
'Abstract'),
'@' + datacite_xml_lang_tag: ckan_description.get(
self._joinTags([datacite_description_tag, datacite_xml_lang_tag]), 'en-us')}
datacite_descriptions += [datacite_description]
if datacite_descriptions:
datacite_dict['resource'][datacite_descriptions_tag] = {datacite_description_tag: datacite_descriptions}
# inherit from package
package_dict = resource_dict.get('package_dict')
if package_dict:
datacite_package_dict = parse(
super(Datacite43SchemingResourceConverter, self)._datacite_converter_schema(package_dict))
datacite_dict['resource'] = self._inherit_from_package(datacite_dict['resource'],
datacite_package_dict['resource'])
# Convert to xml
converted_package = unparse(datacite_dict, pretty=True)
except Exception as e:
log.exception(e)
return None
return converted_package
def _inherit_from_package(self, datacite_dict, datacite_package_dict):
def merge_dict_lists(dict1, dict2):
for key in dict1.keys():
if type(dict1[key]) is list:
list1 = dict1[key]
list2 = dict2.get(key, [])
if type(dict2.get(key, [])) is not list:
list2 = [list2]
for item in list2:
if item not in list1:
dict1[key] += [item]
return dict1
try:
# values from the resource are added or replace the package
replace = ['identifier', 'sizes', 'version', 'formats', 'resourceType', 'alternateIdentifiers']
for key in datacite_dict.keys():
if (key in replace) or (type(datacite_dict[key]) is not dict):
datacite_package_dict[key] = datacite_dict[key]
else:
datacite_package_dict[key] = merge_dict_lists(datacite_dict[key],
datacite_package_dict.get(key, {}))
return (datacite_package_dict)
except Exception as e:
log.exception(e)
return datacite_dict
| agpl-3.0 | -4,915,734,971,167,741,000 | 56.309942 | 120 | 0.522143 | false |
opennode/nodeconductor-assembly-waldur | src/waldur_slurm/base.py | 1 | 5476 | import abc
import logging
import subprocess # noqa: S404
from django.utils.functional import cached_property
from .structures import Quotas
logger = logging.getLogger(__name__)
class BatchError(Exception):
pass
class BaseBatchClient(metaclass=abc.ABCMeta):
def __init__(self, hostname, key_path, username='root', port=22, use_sudo=False):
self.hostname = hostname
self.key_path = key_path
self.username = username
self.port = port
self.use_sudo = use_sudo
@abc.abstractmethod
def list_accounts(self):
"""
Get accounts list.
:return: list[structures.Account object]
"""
raise NotImplementedError()
@abc.abstractmethod
def get_account(self, name):
"""
Get account info.
:param name: [string] batch account name
:return: [structures.Account object]
"""
raise NotImplementedError()
@abc.abstractmethod
def create_account(self, name, description, organization, parent_name=None):
"""
Create account.
:param name: [string] account name
:param description: [string] account description
:param organization: [string] account organization name
:param parent_name: [string] account parent name. Optional.
:return: None
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_account(self, name):
"""
Delete account.
:param name: [string] account name
:return: None
"""
raise NotImplementedError()
@abc.abstractmethod
def set_resource_limits(self, account, quotas):
"""
Set account limits.
:param account: [string] account name
:param quotas: [structures.Quotas object] limits
:return: None
"""
raise NotImplementedError()
@abc.abstractmethod
def get_association(self, user, account):
"""
Get association user and account.
:param user: [string] user name
:param account: [string] account name
:return: [structures.Association object]
"""
raise NotImplementedError()
@abc.abstractmethod
def create_association(self, username, account, default_account=None):
"""
Create association user and account
:param username: [string] user name
:param account: [string] account name
:param default_account: [string] default account name. Optional.
:return: None
"""
raise NotImplementedError()
@abc.abstractmethod
def delete_association(self, username, account):
"""
Delete_association user and account.
:param username: [string] user name
:param account: [string] account name
:return: None
"""
raise NotImplementedError()
@abc.abstractmethod
def get_usage_report(self, accounts):
"""
Get usages records.
:param accounts: list[string]
:return: list[BaseReportLine]
"""
raise NotImplementedError()
def execute_command(self, command):
server = '%s@%s' % (self.username, self.hostname)
port = str(self.port)
if self.use_sudo:
account_command = ['sudo']
else:
account_command = []
account_command.extend(command)
ssh_command = [
'ssh',
'-o',
'UserKnownHostsFile=/dev/null',
'-o',
'StrictHostKeyChecking=no',
server,
'-p',
port,
'-i',
self.key_path,
' '.join(account_command),
]
try:
logger.debug('Executing SSH command: %s', ' '.join(ssh_command))
return subprocess.check_output( # noqa: S603
ssh_command, stderr=subprocess.STDOUT, encoding='utf-8'
)
except subprocess.CalledProcessError as e:
logger.exception('Failed to execute command "%s".', ssh_command)
stdout = e.output or ''
lines = stdout.splitlines()
if len(lines) > 0 and lines[0].startswith('Warning: Permanently added'):
lines = lines[1:]
stdout = '\n'.join(lines)
raise BatchError(stdout)
class BaseReportLine(metaclass=abc.ABCMeta):
@abc.abstractproperty
def account(self):
pass
@abc.abstractproperty
def user(self):
pass
@property
def cpu(self):
return 0
@property
def gpu(self):
return 0
@property
def ram(self):
return 0
@property
def duration(self):
return 0
@property
def charge(self):
return 0
@property
def node(self):
return 0
@cached_property
def quotas(self):
return Quotas(
self.cpu * self.duration * self.node,
self.gpu * self.duration * self.node,
self.ram * self.duration * self.node,
self.charge,
)
def __str__(self):
return (
"ReportLine: User=%s, Account=%s, CPU=%s, GPU=%s, RAM=%s, Duration=%s, Charge=%s, Node=%s"
% (
self.user,
self.account,
self.cpu,
self.gpu,
self.ram,
self.duration,
self.charge,
self.node,
)
)
| mit | 5,122,500,161,127,197,000 | 25.843137 | 102 | 0.557524 | false |
PatrickLeonard/superlists | functional_tests/test_my_lists.py | 1 | 1696 | from .base import FunctionalTest
class MyListsTest(FunctionalTest):
def test_logged_in_users_lists_are_saved_as_my_lists(self):
# Sally is a logged-in user
self.create_pre_authenticated_session('[email protected]')
# Sally goes to the home page and starts a list
self.browser.get(self.server_url)
self.get_item_input_box().send_keys('Lorem ipsum\n')
self.get_item_input_box().send_keys('Sexy Riker ipsum\n')
first_list_url = self.browser.current_url
# She notices a "My lists" link, for the first time
self.browser.find_element_by_link_text('My lists').click()
# She sees that her list is in there, named according to its
# first list item
self.browser.find_element_by_link_text('Lorem ipsum').click()
self.wait_for(
lambda: self.assertEqual(self.browser.current_url, first_list_url)
)
# She decides to start another list, just to see
self. browser.get(self.server_url)
self.get_item_input_box().send_keys('Icek Te Picard\n')
second_list_url = self.browser.current_url
# Under "My lists", her new list appears
self.browser.find_element_by_link_text('My lists').click()
self.browser.find_element_by_link_text('Icek Te Picard').click()
self.assertEqual(self.browser.current_url, second_list_url)
# She logs out. The "My lists" option disappears
self.browser.find_element_by_link_text('Log out').click()
self.wait_for(
lambda: self.assertEqual(
self.browser.find_elements_by_link_text('My lists'),
[]
))
| mit | -4,624,676,073,954,410,000 | 39.380952 | 78 | 0.623821 | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsversion.py | 1 | 2841 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsversion(base_resource) :
""" Configuration for version resource. """
#------- Read only Parameter ---------
def __init__(self) :
self._version = ""
self._mode = 0
@property
def version(self) :
"""Version.
"""
try :
return self._version
except Exception as e:
raise e
@property
def mode(self) :
"""Kernel mode (KMPE/VMPE).
"""
try :
return self._mode
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsversion_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsversion
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the nsversion resources that are configured on netscaler.
"""
try :
if not name :
obj = nsversion()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class nsversion_response(base_response) :
def __init__(self, length=1) :
self.nsversion = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsversion = [nsversion() for _ in range(length)]
| apache-2.0 | 9,031,897,375,595,689,000 | 27.69697 | 111 | 0.696586 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/operations/_frontend_endpoints_operations.py | 1 | 23630 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FrontendEndpointsOperations(object):
"""FrontendEndpointsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.frontdoor.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_front_door(
self,
resource_group_name, # type: str
front_door_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FrontendEndpointsListResult"]
"""Lists all of the frontend endpoints within a Front Door.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FrontendEndpointsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.frontdoor.models.FrontendEndpointsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendEndpointsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_front_door.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FrontendEndpointsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_front_door.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints'} # type: ignore
def get(
self,
resource_group_name, # type: str
front_door_name, # type: str
frontend_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FrontendEndpoint"
"""Gets a Frontend endpoint with the specified name within the specified Front Door.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:param frontend_endpoint_name: Name of the Frontend endpoint which is unique within the Front
Door.
:type frontend_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendEndpoint, or the result of cls(response)
:rtype: ~azure.mgmt.frontdoor.models.FrontendEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}'} # type: ignore
def _enable_https_initial(
self,
resource_group_name, # type: str
front_door_name, # type: str
frontend_endpoint_name, # type: str
custom_https_configuration, # type: "_models.CustomHttpsConfiguration"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._enable_https_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(custom_https_configuration, 'CustomHttpsConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_enable_https_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/enableHttps'} # type: ignore
def begin_enable_https(
self,
resource_group_name, # type: str
front_door_name, # type: str
frontend_endpoint_name, # type: str
custom_https_configuration, # type: "_models.CustomHttpsConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Enables a frontendEndpoint for HTTPS traffic.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:param frontend_endpoint_name: Name of the Frontend endpoint which is unique within the Front
Door.
:type frontend_endpoint_name: str
:param custom_https_configuration: The configuration specifying how to enable HTTPS.
:type custom_https_configuration: ~azure.mgmt.frontdoor.models.CustomHttpsConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._enable_https_initial(
resource_group_name=resource_group_name,
front_door_name=front_door_name,
frontend_endpoint_name=frontend_endpoint_name,
custom_https_configuration=custom_https_configuration,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_enable_https.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/enableHttps'} # type: ignore
def _disable_https_initial(
self,
resource_group_name, # type: str
front_door_name, # type: str
frontend_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._disable_https_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disable_https_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/disableHttps'} # type: ignore
def begin_disable_https(
self,
resource_group_name, # type: str
front_door_name, # type: str
frontend_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Disables a frontendEndpoint for HTTPS traffic.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param front_door_name: Name of the Front Door which is globally unique.
:type front_door_name: str
:param frontend_endpoint_name: Name of the Frontend endpoint which is unique within the Front
Door.
:type frontend_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._disable_https_initial(
resource_group_name=resource_group_name,
front_door_name=front_door_name,
frontend_endpoint_name=frontend_endpoint_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=80, min_length=1, pattern=r'^[a-zA-Z0-9_\-\(\)\.]*[^\.]$'),
'frontDoorName': self._serialize.url("front_door_name", front_door_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$'),
'frontendEndpointName': self._serialize.url("frontend_endpoint_name", frontend_endpoint_name, 'str', max_length=255, min_length=1, pattern=r'^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disable_https.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/disableHttps'} # type: ignore
| mit | -8,208,874,811,439,488,000 | 53.073227 | 240 | 0.640711 | false |
hakujyo/chessplaying_robot | matchpicture.py | 1 | 1381 |
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
x = 0
y = 0
def makematchpicture(Grayurl):
img = cv2.imread(Grayurl, 0)
img2 = img.copy()
template = cv2.imread('test.png', 0)
w, h = template.shape[::-1]
methods = ['cv2.TM_SQDIFF']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Apply template Matching
res = cv2.matchTemplate(img, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
print("模板匹配:",int(top_left[0] + w / 2), int(top_left[1] + h / 2))
cv2.rectangle(img, top_left, bottom_right, 255, 2)
plt.figure()
plt.subplot(121), plt.imshow(res, cmap='gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img, cmap='gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
global x
global y
x = int(top_left[0] + w / 2)
y = int(top_left[1] + h / 2)
| gpl-3.0 | 3,895,924,439,547,048,000 | 27.804348 | 73 | 0.547046 | false |
GoogleCloudPlatform/ml-on-gcp | tutorials/sklearn/hpsearch/gke_parallel.py | 1 | 16248 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from helpers.gke_helper import get_cluster
from helpers.gcs_helper import pickle_and_upload, get_uri_blob, download_uri_and_unpickle
from helpers.kubernetes_helper import create_job, delete_jobs_pods
from copy import deepcopy
from itertools import product
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from skopt import BayesSearchCV
from skopt.space import Categorical, Integer, Real
class GKEParallel(object):
SUPPORTED_SEARCH = [
GridSearchCV,
RandomizedSearchCV,
BayesSearchCV
]
def __init__(self, search, project_id, zone, cluster_id, bucket_name, image_name, task_name=None):
"""Wraps around a SearchCV object and handles deploying `fit`
jobs to a GKE cluster.
"""
if type(search) not in self.SUPPORTED_SEARCH:
raise TypeError('Search type {} not supported. Only supporting {}.'.format(type(search), [s.__name__ for s in self.SUPPORTED_SEARCH]))
self.search = search
self.project_id = project_id
self.cluster_id = cluster_id
self.bucket_name = bucket_name
self.image_name = image_name
self.task_name = task_name
self.gcs_uri = None
self.cluster = get_cluster(project_id, zone, cluster_id)
self.n_nodes = self.cluster['currentNodeCount']
self.task_name = None
# For GridSearchCV
self.param_grids = {}
# For RandomizedSearchCV
self.param_distributions = None
self.n_iter = None
# For BayesSearchCV
self.search_spaces = {}
self.job_names = {}
self.output_uris = {}
self.output_without_estimator_uris = {}
self.dones = {}
self.results = {}
self.best_estimator_ = None
self.best_params_ = None
self.best_score_ = None
self.best_search_ = None
self._cancelled = False
self._done = False
def _make_job_name(self, worker_id):
return '{}.worker.{}'.format(self.task_name, worker_id)
def _make_job_body(self, worker_id, X_uri, y_uri):
body = {
'apiVersion': 'batch/v1',
'kind': 'Job',
'metadata': {
'name': self._make_job_name(worker_id)
},
'spec': {
'template': {
'spec': {
'containers': [
{
'image': 'gcr.io/{}/{}'.format(self.project_id, self.image_name),
'command': ['python'],
'args': ['worker.py', self.bucket_name, self.task_name, worker_id, X_uri, y_uri],
'name': 'worker'
}
],
'restartPolicy': 'OnFailure'}
}
}
}
return body
def _deploy_job(self, worker_id, X_uri, y_uri):
job_body = self._make_job_body(worker_id, X_uri, y_uri)
print('Deploying worker {}'.format(worker_id))
create_job(job_body)
def _partition_grid(self, param_grid_dict, partition_keys):
_param_grid_dict = deepcopy(param_grid_dict)
partition_lists = [_param_grid_dict.pop(key) for key in partition_keys]
partitioned = []
for prod in product(*partition_lists):
lists = [[element] for element in prod]
singleton = dict(zip(partition_keys, lists))
singleton.update(_param_grid_dict)
partitioned.append(singleton)
return partitioned
def _partition_param_grid(self, param_grid, target_n_partition=5):
"""Returns a list of param_grids whose union is the input
param_grid.
If param_grid is a dict:
The implemented strategy attempts to partition the param_grid
into at least target_n_partition smaller param_grids.
NOTE: The naive strategy implemented here does not distinguish
between different types of parameters nor their impact on the
running time. The user of this module is encouraged to
implement their own paritioning strategy based on their needs.
"""
if type(param_grid) == list:
# If the input is already a list of param_grids then just
# use it as is.
return param_grid
else:
# The strategy is to simply expand the grid fully with
# respect to a parameter:
# [1, 2, 3]x[4, 5] --> [1]x[4, 5], [2]x[4, 5], [3]x[4, 5]
# until the target number of partitions is reached.
partition_keys = []
n_partition = 1
for key, lst in param_grid.items():
partition_keys.append(key)
n_partition *= len(lst)
if n_partition >= target_n_partition:
break
partitioned = self._partition_grid(param_grid, partition_keys)
return partitioned
def _handle_grid_search(self, X_uri, y_uri):
param_grids = self._partition_param_grid(self.search.param_grid, self.n_nodes)
for i, param_grid in enumerate(param_grids):
worker_id = str(i)
self.param_grids[worker_id] = param_grid
self.job_names[worker_id] = self._make_job_name(worker_id)
self.output_uris[worker_id] = 'gs://{}/{}/{}/fitted_search.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.output_without_estimator_uris[worker_id] = 'gs://{}/{}/{}/fitted_search_without_estimator.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.dones[worker_id] = False
pickle_and_upload(param_grid, self.bucket_name, '{}/{}/param_grid.pkl'.format(self.task_name, worker_id))
self._deploy_job(worker_id, X_uri, y_uri)
def _handle_randomized_search(self, X_uri, y_uri):
self.param_distributions = self.search.param_distributions
self.n_iter = self.search.n_iter
n_iter = self.n_iter / self.n_nodes + 1
for i in xrange(self.n_nodes):
worker_id = str(i)
self.job_names[worker_id] = self._make_job_name(worker_id)
self.output_uris[worker_id] = 'gs://{}/{}/{}/fitted_search.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.output_without_estimator_uris[worker_id] = 'gs://{}/{}/{}/fitted_search_without_estimator.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.dones[worker_id] = False
pickle_and_upload(self.param_distributions, self.bucket_name, '{}/{}/param_distributions.pkl'.format(self.task_name, worker_id))
pickle_and_upload(n_iter, self.bucket_name, '{}/{}/n_iter.pkl'.format(self.task_name, worker_id))
self._deploy_job(worker_id, X_uri, y_uri)
def _partition_space(self, space):
"""Partitions the space into two subspaces. In the case of
Real and Integer, the subspaces are not disjoint, but
overlapping at an endpoint.
The argument `space` should be a dict whose values are
skopt.space's Categorical, Integer, or Real.
"""
partition_key = np.random.choice(space.keys())
dimension = space[partition_key]
if type(dimension) == Categorical:
categories = dimension.categories
prior = dimension.prior
transform = dimension.transform_
if len(categories) >= 2:
mid_index = len(categories) / 2
left_categories = categories[:mid_index]
right_categories = categories[mid_index:]
if prior is not None:
left_prior = prior[:mid_index]
left_weight = sum(left_prior)
left_prior = [p/left_weight for p in left_prior]
right_prior = prior[mid_index:]
right_weight = sum(right_prior)
right_prior = [p/right_weight for p in right_prior]
else:
left_prior = None
right_prior = None
left = Categorical(left_categories, prior=left_prior, transform=transform)
right = Categorical(right_categories, prior=right_prior, transform=transform)
else:
return [space]
elif type(dimension) == Integer:
low = dimension.low
high = dimension.high
transform = dimension.transform_
if low < high:
mid = int((high - low) / 2)
left = Integer(low, mid, transform=transform)
right = Integer(mid, high, transform=transform)
else:
return [space]
elif type(dimension) == Real:
low = dimension.low
high = dimension.high
prior = dimension.prior
transform = dimension.transform_
if low < high:
mid = (high - low) / 2
left = Real(low, mid, prior=prior, transform=transform)
right = Real(mid, high, prior=prior, transform=transform)
else:
return [space]
left_space = deepcopy(space)
left_space[partition_key] = left
right_space = deepcopy(space)
right_space[partition_key] = right
return [left_space, right_space]
def _partition_search_spaces(self, search_spaces, target_n_partition=5):
"""Returns a list of search_spaces whose union is the input
search_spaces.
If search_spaces is a dict:
The implemented strategy attempts to partition the search_spaces
into at least target_n_partition smaller search_spaces.
NOTE: The naive strategy implemented here does not distinguish
between different types of parameters nor their impact on the
running time. The user of this module is encouraged to
implement their own paritioning strategy based on their needs.
"""
if type(search_spaces[0]) == tuple:
# If the input is already a list of search_spaces then just
# use it as is.
return search_spaces.values()
else:
result = search_spaces.values()
while len(result) < target_n_partition:
space = result.pop()
partitioned = self._partition_space(space)
result.extend(partitioned)
return result
def _handle_bayes_search(self, X_uri, y_uri):
partitioned_search_spaces = self._partition_search_spaces(self.search.search_spaces_, self.n_nodes)
for i, search_spaces in enumerate(partitioned_search_spaces):
worker_id = str(i)
self.search_spaces[worker_id] = search_spaces
self.job_names[worker_id] = self._make_job_name(worker_id)
self.output_uris[worker_id] = 'gs://{}/{}/{}/fitted_search.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.output_without_estimator_uris[worker_id] = 'gs://{}/{}/{}/fitted_search_without_estimator.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.dones[worker_id] = False
pickle_and_upload(search_spaces, self.bucket_name, '{}/{}/search_spaces.pkl'.format(self.task_name, worker_id))
self._deploy_job(worker_id, X_uri, y_uri)
def _upload_data(self, X, y):
if type(X) == str and X.startswith('gs://'):
X_uri = X
else:
X_uri = pickle_and_upload(X, self.bucket_name, '{}/X.pkl'.format(self.task_name))
if type(y) == str and y.startswith('gs://'):
y_uri = y
else:
y_uri = pickle_and_upload(y, self.bucket_name, '{}/y.pkl'.format(self.task_name))
search_uri = pickle_and_upload(self.search, self.bucket_name, '{}/search.pkl'.format(self.task_name))
return X_uri, y_uri, search_uri
def fit(self, X, y):
"""Deploys `fit` jobs to each worker in the cluster.
"""
timestamp = str(int(time.time()))
self.task_name = self.task_name or '{}.{}.{}'.format(self.cluster_id, self.image_name, timestamp)
self._done = False
self._cancelled = False
X_uri, y_uri, _ = self._upload_data(X, y)
if type(self.search) == GridSearchCV:
handler = self._handle_grid_search
elif type(self.search) == RandomizedSearchCV:
handler = self._handle_randomized_search
elif type(self.search) == BayesSearchCV:
handler = self._handle_bayes_search
print('Fitting {}'.format(type(self.search)))
handler(X_uri, y_uri)
self.persist()
def persist(self):
"""Pickle and upload self to GCS, allowing recovering of parallel
search objects across experiments.
"""
self.gcs_uri = pickle_and_upload(self, self.bucket_name, '{}/gke_search.pkl'.format(self.task_name))
print('Persisted the GKEParallel instance: {}'.format(self.gcs_uri))
# Implement part of the concurrent.future.Future interface.
def done(self):
if not self._done:
for worker_id, output_uri in self.output_uris.items():
print('Checking if worker {} is done'.format(worker_id))
self.dones[worker_id] = get_uri_blob(output_uri).exists()
self._done = all(self.dones.values())
return self._done
def cancel(self):
"""Deletes the kubernetes jobs.
Persisted data and the cluster will not be deleted."""
if not self._cancelled:
delete_jobs_pods(self.job_names.values())
self._cancelled = True
def cancelled(self):
return self._cancelled
def result(self, download=False):
if not self.done():
n_done = len(d for d in self.dones.values() if d)
print('Not done: {} out of {} workers completed.'.format(n_done, len(self.dones)))
return None
if not self.results or download:
for worker_id, output_uri in self.output_without_estimator_uris.items():
print('Getting result from worker {}'.format(worker_id))
self.results[worker_id] = download_uri_and_unpickle(output_uri)
self._aggregate_results(download)
self.persist()
return self.results
def _aggregate_results(self, download):
best_id = None
for worker_id, result in self.results.items():
if self.best_score_ is None or result.best_score_ > self.best_score_ or download:
self.best_score_ = result.best_score_
self.best_params_ = result.best_params_
best_id = worker_id
if download and self.best_estimator_ is None:
# Download only the best estimator among the workers.
print('Downloading the best estimator (worker {}).'.format(best_id))
output_uri = self.output_uris[best_id]
self.best_search_ = download_uri_and_unpickle(output_uri)
self.best_estimator_ = self.best_search_.best_estimator_
# Implement part of SearchCV interface by delegation.
def predict(self, *args, **kwargs):
return self.best_estimator_.predict(*args, **kwargs)
def predict_proba(self, *args, **kwargs):
return self.best_estimator_.predict_proba(*args, **kwargs)
def predict_log_proba(self, *args, **kwargs):
return self.best_estimator_.predict_log_proba(*args, **kwargs)
| apache-2.0 | -32,083,320,555,209,584 | 35.927273 | 163 | 0.586595 | false |
RayRuizhiLiao/ITK_4D | Examples/RegistrationITKv3/ImageRegistration3.py | 1 | 4570 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import itk
from sys import argv
#
# Check input parameters
# INPUTS(fixedImage): {BrainProtonDensitySliceBorder20.png}
# INPUTS(movingImage): {BrainProtonDensitySliceShifted13x17y.png}
#
if len(argv) < 4:
print 'Missing Parameters'
print 'Usage: ImageRegistration3.py fixedImageFile movingImageFile outputImagefile'
exit()
#
# Define data types
#
FixedImageType = itk.Image[itk.F, 2]
MovingImageType = itk.Image[itk.F, 2]
OutputImageType = itk.Image[itk.UC, 2]
TransformType = itk.TranslationTransform[itk.D, 2]
#
# Read the fixed and moving images using filenames
# from the command line arguments
#
fixedImageReader = itk.ImageFileReader[FixedImageType].New()
movingImageReader = itk.ImageFileReader[MovingImageType].New()
fixedImageReader.SetFileName( argv[1] )
movingImageReader.SetFileName( argv[2] )
fixedImageReader.Update()
movingImageReader.Update()
fixedImage = fixedImageReader.GetOutput()
movingImage = movingImageReader.GetOutput()
#
# Instantiate the classes for the registration framework
#
registration = itk.ImageRegistrationMethod[FixedImageType, MovingImageType].New()
imageMetric = itk.MeanSquaresImageToImageMetric[FixedImageType, MovingImageType].New()
transform = TransformType.New()
optimizer = itk.RegularStepGradientDescentOptimizer.New()
interpolator = itk.LinearInterpolateImageFunction[FixedImageType, itk.D].New()
registration.SetOptimizer( optimizer )
registration.SetTransform( transform )
registration.SetInterpolator( interpolator )
registration.SetMetric( imageMetric )
registration.SetFixedImage( fixedImage )
registration.SetMovingImage( movingImage )
registration.SetFixedImageRegion( fixedImage.GetBufferedRegion() )
transform.SetIdentity()
initialParameters = transform.GetParameters()
registration.SetInitialTransformParameters( initialParameters )
#
# Iteration Observer
#
def iterationUpdate():
currentParameter = transform.GetParameters()
print "M: %f P: %f %f " % ( optimizer.GetValue(),
currentParameter.GetElement(0),
currentParameter.GetElement(1) )
iterationCommand = itk.PyCommand.New()
iterationCommand.SetCommandCallable( iterationUpdate )
optimizer.AddObserver( itk.IterationEvent(), iterationCommand )
#
# Define optimizer parameters
#
optimizer.SetMaximumStepLength( 4.00 )
optimizer.SetMinimumStepLength( 0.01 )
optimizer.SetNumberOfIterations( 200 )
print "Starting registration"
#
# Start the registration process
#
registration.Update()
#
# Get the final parameters of the transformation
#
finalParameters = registration.GetLastTransformParameters()
print "Final Registration Parameters "
print "Translation X = %f" % (finalParameters.GetElement(0),)
print "Translation Y = %f" % (finalParameters.GetElement(1),)
#
# Now, we use the final transform for resampling the
# moving image.
#
resampler = itk.ResampleImageFilter[MovingImageType, FixedImageType].New()
resampler.SetTransform( transform )
resampler.SetInput( movingImage )
region = fixedImage.GetLargestPossibleRegion()
resampler.SetSize( region.GetSize() )
resampler.SetOutputSpacing( fixedImage.GetSpacing() )
resampler.SetOutputOrigin( fixedImage.GetOrigin() )
resampler.SetOutputDirection( fixedImage.GetDirection() )
resampler.SetDefaultPixelValue( 100 )
outputCast = itk.RescaleIntensityImageFilter[FixedImageType, OutputImageType].New()
outputCast.SetInput(resampler.GetOutput())
#
# Write the resampled image
#
writer = itk.ImageFileWriter[OutputImageType].New()
writer.SetFileName( argv[3] )
writer.SetInput( outputCast.GetOutput() )
writer.Update()
| apache-2.0 | -8,696,359,384,806,764,000 | 27.869281 | 88 | 0.715317 | false |
255BITS/HyperGAN | hypergan/trainers/multi_step_trainer.py | 1 | 2012 | import tensorflow as tf
import numpy as np
import hyperchamber as hc
import inspect
from hypergan.trainers.base_trainer import BaseTrainer
TINY = 1e-12
class MultiStepTrainer(BaseTrainer):
def __init__(self, gan, config, losses=[], var_lists=[], metrics=None):
self.losses = losses
self.var_lists = var_lists
self.metrics = metrics or [None for i in self.losses]
BaseTrainer.__init__(self, gan, config)
def _create(self):
gan = self.gan
config = self.config
losses = self.losses
optimizers = []
for i, _ in enumerate(losses):
loss = losses[i][1]
var_list = self.var_lists[i]
is_generator = 'generator' in losses[i][0]
if is_generator:
optimizer = self.build_optimizer(config, 'g_', config.g_trainer, self.g_lr, var_list, loss)
else:
optimizer = self.build_optimizer(config, 'd_', config.d_trainer, self.d_lr, var_list, loss)
optimizers.append(optimizer)
self.optimizers = optimizers
if config.d_clipped_weights:
self.clip = [tf.assign(d,tf.clip_by_value(d, -config.d_clipped_weights, config.d_clipped_weights)) for d in d_vars]
else:
self.clip = []
return None
def _step(self, feed_dict):
gan = self.gan
sess = gan.session
config = self.config
losses = self.losses
metrics = self.metrics
for i, _ in enumerate(losses):
loss = losses[i]
optimizer = self.optimizers[i]
metric = metrics[i]
if(metric):
metric_values = sess.run([optimizer] + self.output_variables(metric), feed_dict)[1:]
if self.current_step % 100 == 0:
print("loss " + str(i) + " "+ loss[0] + " " + self.output_string(metric) % tuple([self.current_step] + metric_values))
else:
_ = sess.run(optimizer, feed_dict)
| mit | -970,139,597,169,082,900 | 31.983607 | 139 | 0.564612 | false |
Reat0ide/plugin.video.pelisalacarta | channelselector.py | 1 | 11006 | # -*- coding: utf-8 -*-
import urlparse,urllib2,urllib,re
import os
import sys
from core import config
from core import logger
from core.item import Item
DEBUG = True
CHANNELNAME = "channelselector"
def getmainlist():
logger.info("channelselector.getmainlist")
itemlist = []
# Obtiene el idioma, y el literal
idioma = config.get_setting("languagefilter")
logger.info("channelselector.getmainlist idioma=%s" % idioma)
langlistv = [config.get_localized_string(30025),config.get_localized_string(30026),config.get_localized_string(30027),config.get_localized_string(30028),config.get_localized_string(30029)]
try:
idiomav = langlistv[int(idioma)]
except:
idiomav = langlistv[0]
# Añade los canales que forman el menú principal
itemlist.append( Item(title=config.get_localized_string(30118)+" ("+idiomav+")" , channel="channelselector" , action="channeltypes", thumbnail = urlparse.urljoin(get_thumbnail_path(),"channelselector.png") ) )
if "xbmceden" in config.get_platform():
itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"configuracion.png"), folder=False) )
else:
itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"configuracion.png")) )
return itemlist
def mainlist(params,url,category):
logger.info("channelselector.mainlist")
# Verifica actualizaciones solo en el primer nivel
if config.get_platform()!="boxee":
try:
from core import updater
except ImportError:
logger.info("channelselector.mainlist No disponible modulo actualizaciones")
else:
if config.get_setting("updatecheck2") == "true":
logger.info("channelselector.mainlist Verificar actualizaciones activado")
try:
updater.checkforupdates()
except:
import xbmcgui
dialog = xbmcgui.Dialog()
dialog.ok("No se puede conectar","No ha sido posible comprobar","si hay actualizaciones")
logger.info("channelselector.mainlist Fallo al verificar la actualización")
pass
else:
logger.info("channelselector.mainlist Verificar actualizaciones desactivado")
itemlist = getmainlist()
for elemento in itemlist:
logger.info("channelselector.mainlist item="+elemento.title)
addfolder(elemento.title , elemento.channel , elemento.action , thumbnail=elemento.thumbnail, folder=elemento.folder)
# Label (top-right)...
import xbmcplugin
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category="" )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
if config.get_setting("forceview")=="true":
# Confluence - Thumbnail
import xbmc
xbmc.executebuiltin("Container.SetViewMode(500)")
def getchanneltypes():
logger.info("channelselector getchanneltypes")
itemlist = []
itemlist.append( Item( title=config.get_localized_string(30121) , channel="channelselector" , action="listchannels" , category="*" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"channelselector")))
itemlist.append( Item( title=config.get_localized_string(30122) , channel="channelselector" , action="listchannels" , category="F" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"peliculas")))
itemlist.append( Item( title=config.get_localized_string(30123) , channel="channelselector" , action="listchannels" , category="S" , thumbnail=urlparse.urljoin(get_thumbnail_path(),"series")))
return itemlist
def channeltypes(params,url,category):
logger.info("channelselector.mainlist channeltypes")
lista = getchanneltypes()
for item in lista:
addfolder(item.title,item.channel,item.action,item.category,item.thumbnail,item.thumbnail)
# Label (top-right)...
import xbmcplugin
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category="" )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
if config.get_setting("forceview")=="true":
# Confluence - Thumbnail
import xbmc
xbmc.executebuiltin("Container.SetViewMode(500)")
def listchannels(params,url,category):
logger.info("channelselector.listchannels")
lista = filterchannels(category)
for channel in lista:
if channel.type=="xbmc" or channel.type=="generic":
if channel.channel=="personal":
thumbnail=config.get_setting("personalchannellogo")
elif channel.channel=="personal2":
thumbnail=config.get_setting("personalchannellogo2")
elif channel.channel=="personal3":
thumbnail=config.get_setting("personalchannellogo3")
elif channel.channel=="personal4":
thumbnail=config.get_setting("personalchannellogo4")
elif channel.channel=="personal5":
thumbnail=config.get_setting("personalchannellogo5")
else:
thumbnail=channel.thumbnail
if thumbnail == "":
thumbnail=urlparse.urljoin(get_thumbnail_path(),channel.channel+".png")
#thumbnail = "/Users/arturo/Library/Application Support/Kodi/addons/plugin.video.pelisalacarta/resources/images" + channel.channel+".png"
addfolder(channel.title , channel.channel , "mainlist" , channel.channel, thumbnail = thumbnail)
# Label (top-right)...
import xbmcplugin
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
if config.get_setting("forceview")=="true":
# Confluence - Thumbnail
import xbmc
xbmc.executebuiltin("Container.SetViewMode(500)")
def filterchannels(category):
returnlist = []
if category=="NEW":
channelslist = channels_history_list()
for channel in channelslist:
channel.thumbnail = urlparse.urljoin(get_thumbnail_path(),channel.channel+".png")
channel.plot = channel.category.replace("VOS","Versión original subtitulada").replace("F","Películas").replace("S","Series").replace("D","Documentales").replace("A","Anime").replace(",",", ")
returnlist.append(channel)
else:
try:
idioma = config.get_setting("languagefilter")
logger.info("channelselector.filterchannels idioma=%s" % idioma)
langlistv = ["","ES","EN","IT","PT"]
idiomav = langlistv[int(idioma)]
logger.info("channelselector.filterchannels idiomav=%s" % idiomav)
except:
idiomav=""
channelslist = channels_list()
for channel in channelslist:
# Pasa si no ha elegido "todos" y no está en la categoría elegida
if category<>"*" and category not in channel.category:
#logger.info(channel[0]+" no entra por tipo #"+channel[4]+"#, el usuario ha elegido #"+category+"#")
continue
# Pasa si no ha elegido "todos" y no está en el idioma elegido
if channel.language<>"" and idiomav<>"" and idiomav not in channel.language:
#logger.info(channel[0]+" no entra por idioma #"+channel[3]+"#, el usuario ha elegido #"+idiomav+"#")
continue
if channel.thumbnail == "":
channel.thumbnail = urlparse.urljoin(get_thumbnail_path(),channel.channel+".png")
#channel.thumbnail = get_thumbnail_path(),channel.channel+".png"
channel.plot = channel.category.replace("VOS","Versión original subtitulada").replace("F","Películas").replace("S","Series").replace("D","Documentales").replace("A","Anime").replace(",",", ")
returnlist.append(channel)
return returnlist
def channels_list():
itemlist = []
#itemlist.append( Item( viewmode="movie", title="Play URL" , channel="tengourl" , language="" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="Itastreaming" , channel="itastreaming" , language="IT" , category="F,S,A" , type="generic" ))
itemlist.append( Item( title="Altadefinizione" , channel="altadefinizione" , language="IT" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="Altadefinizione NG" , channel="altadefinizione_NG" , language="IT" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="CB01" , channel="cb01" , language="IT" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="Griffin Ita" , channel="griffin" , language="IT" , category="F,S,D,A" , type="generic" ))
itemlist.append( Item( title="Itastreaming_test" , channel="itastreaming_test" , language="IT" , category="F,S,D,A" , type="generic" ))
return itemlist
def addfolder(nombre,channelname,accion,category="",thumbnailname="",thumbnail="",folder=True):
if category == "":
try:
category = unicode( nombre, "utf-8" ).encode("iso-8859-1")
except:
pass
import xbmc
import xbmcgui
import xbmcplugin
listitem = xbmcgui.ListItem( nombre , iconImage="DefaultFolder.png", thumbnailImage=thumbnail)
itemurl = '%s?channel=%s&action=%s&category=%s' % ( sys.argv[ 0 ] , channelname , accion , category )
xbmcplugin.addDirectoryItem( handle = int(sys.argv[ 1 ]), url = itemurl , listitem=listitem, isFolder=folder)
def get_thumbnail_path():
WEB_PATH = ""
thumbnail_type = config.get_setting("thumbnail_type")
if thumbnail_type=="":
thumbnail_type="2"
if thumbnail_type=="0":
WEB_PATH = "http://pelisalacarta.mimediacenter.info/posters/"
elif thumbnail_type=="1":
WEB_PATH = "http://pelisalacarta.mimediacenter.info/banners/"
elif thumbnail_type=="2":
WEB_PATH = "http://pelisalacarta.mimediacenter.info/squares/"
#WEB_PATH = "/Users/arturo/Library/Application\ Support/Kodi/addons/plugin.video.pelisalacarta/resources/images/"
return WEB_PATH
| gpl-3.0 | -8,419,260,920,260,160,000 | 47.309417 | 213 | 0.634231 | false |
HaloWang/SwiftFFNN | UseTF.py | 1 | 1374 | import json as json
import numpy as np
import tensorflow as tf
with open('data.json') as data_file:
data = json.load(data_file)
x = tf.placeholder(tf.float32, [None, 3])
W = tf.Variable(tf.zeros([3, 1]))
b = tf.Variable(tf.zeros([1]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder("float", [None, 1])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
userInfoData = np.empty((0, 3), dtype=np.float32)
resultData = np.empty((0, 1), dtype=np.float32)
for item in data:
age = np.float32(item["age"])
male = np.float32(item["male"])
single = np.float32(item["single"])
frequency = np.float32(item["frequency"])
inputArray = np.array([[age, male, single]])
answerArray = np.array([[frequency]])
userInfoData = np.append(userInfoData, inputArray, axis=0)
resultData = np.append(resultData, answerArray, axis=0)
print userInfoData.shape
print resultData.shape
sess.run(train_step, feed_dict={x: userInfoData, y_: resultData})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# TODO: Result Test Wrong!
print sess.run(accuracy, feed_dict={x: np.array([[23, 0, 2]]), y_: np.array([[-1]])})
| mit | -1,653,505,655,851,942,000 | 26.48 | 85 | 0.678311 | false |
okolisny/integration_tests | cfme/web_ui/__init__.py | 1 | 143810 | """Provides a number of objects to help with managing certain elements in the CFME UI.
Specifically there are two categories of objects, organizational and elemental.
* **Organizational**
* :py:class:`Region`
* :py:mod:`cfme.web_ui.menu`
* **Elemental**
* :py:class:`AngularCalendarInput`
* :py:class:`AngularSelect`
* :py:class:`ButtonGroup`
* :py:class:`Calendar`
* :py:class:`ColorGroup`
* :py:class:`CheckboxTable`
* :py:class:`CheckboxSelect`
* :py:class:`DHTMLSelect`
* :py:class:`DriftGrid`
* :py:class:`DynamicTable`
* :py:class:`EmailSelectForm`
* :py:class:`Filter`
* :py:class:`Form`
* :py:class:`InfoBlock`
* :py:class:`Input`
* :py:class:`MultiFill`
* :py:class:`Quadicon`
* :py:class:`Radio`
* :py:class:`ScriptBox`
* :py:class:`Select`
* :py:class:`ShowingInputs`
* :py:class:`SplitCheckboxTable`
* :py:class:`SplitTable`
* :py:class:`StatusBox`
* :py:class:`Table`
* :py:class:`Tree`
* :py:mod:`cfme.web_ui.accordion`
* :py:mod:`cfme.web_ui.cfme_exception`
* :py:mod:`cfme.web_ui.expression_editor`
* :py:mod:`cfme.web_ui.flash`
* :py:mod:`cfme.web_ui.form_buttons`
* :py:mod:`cfme.web_ui.jstimelines`
* :py:mod:`cfme.web_ui.listaccordion`
* :py:mod:`cfme.web_ui.menu`
* :py:mod:`cfme.web_ui.mixins`
* :py:mod:`cfme.web_ui.paginator`
* :py:mod:`cfme.web_ui.search`
* :py:mod:`cfme.web_ui.tabstrip`
* :py:mod:`cfme.web_ui.toolbar`
"""
import atexit
import os
import re
import time
import types
from datetime import date
from collections import Sequence, Mapping, Callable, Iterable
from tempfile import NamedTemporaryFile
from xml.sax.saxutils import quoteattr, unescape
from cached_property import cached_property
from selenium.common import exceptions as sel_exceptions
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.remote.file_detector import LocalFileDetector
from multimethods import multimethod, multidispatch, Anything
from widgetastic.xpath import quote
import cfme.fixtures.pytest_selenium as sel
from cfme import exceptions, js
from cfme.fixtures.pytest_selenium import browser
# For backward compatibility with code that pulls in Select from web_ui instead of sel
from cfme.fixtures.pytest_selenium import Select
from cfme.utils import attributize_string, castmap, normalize_space, version
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from wait_for import TimedOutError, wait_for
class Selector(object):
"""
Special Selector object allowing object resolution on attr access
The Selector is a simple class which allows a 'super' widget to support multiple
implementations. This is achieved by the use of a ``decide`` method which accesses
attrs of the object set by the ``__init__`` of the child class. These attributes
are then used to decide which type of object is on a page. In some cases, this can
avoid a version pick if the information used to instantiate both old and new implementations
can be identical. This is most noteably if using an "id" which remains constant from
implementation to implementation.
As an example, imagine the normal "checkbox" is replaced wit ha fancy new web 2.0
checkbox. Both have an "input" element, and give it the same "id". When the decide method is
invoked, the "id" is inspected and used to determine if it is an old or a new style widget.
We then set a hidden attribute of the super widget and proxy all further attr requests to
that object.
This means that in order for things to behave as expect ALL implementations must also expose
the same "public" API.
"""
def __init__(self):
self._obj = None
def __getattr__(self, name):
if not self._obj:
self._obj = self.decide()
return getattr(self._obj, name)
def decide(self):
raise Exception('This widget does not have a "decide" method which is mandatory')
class Region(Pretty):
"""
Base class for all UI regions/pages
Args:
locators: A dict of locator objects for the given region
title: A string containing the title of the page,
or a versioned dict of page title strings
identifying_loc: Single locator key from locators used by :py:meth:`Region.is_displayed`
to check if the region is currently visible
Usage:
page = Region(locators={
'configuration_button': (By.CSS_SELECTOR, "div.dhx_toolbar_btn[title='Configuration']"),
'discover_button': (By.CSS_SELECTOR,
"tr[title='Discover Cloud Providers']>td.td_btn_txt>" "div.btn_sel_text")
},
title='Cloud Providers',
identifying_loc='discover_button'
)
The elements can then accessed like so::
page.configuration_button
Locator attributes will return the locator tuple for that particular element,
and can be passed on to other functions, such as :py:func:`element` and :py:func:`click`.
Note:
When specifying a region title, omit the "Cloudforms Management Engine: " or "ManageIQ: "
prefix. They are included on every page, and different for the two versions of the
appliance, and :py:meth:`is_displayed` strips them off before checking for equality.
"""
pretty_attrs = ['title']
def __getattr__(self, name):
if hasattr(self, 'locators') and name in self.locators:
locator = self.locators[name]
if isinstance(locator, dict):
return version.pick(locator)
else:
return locator
else:
raise AttributeError("Region has no attribute named " + name)
def __init__(self, locators=None, title=None, identifying_loc=None, **kwargs):
self.locators = locators
self.identifying_loc = identifying_loc
self._title = title
self.infoblock = InfoBlock # Legacy support
@property
def title(self):
# support title being a versioned dict
if isinstance(self._title, dict):
self._title = version.pick(self._title)
return self._title
def is_displayed(self):
"""
Checks to see if the region is currently displayed.
Returns: A boolean describing if the region is currently displayed
"""
if not self.identifying_loc and not self.title:
logger.warning("Region doesn't have an identifying locator or title, "
"can't determine if current page.")
return True
# All page titles have a prefix; strip it off
window_title = browser_title()
if self.identifying_loc and sel.is_displayed(
self.locators[self.identifying_loc], _no_deeper=True):
ident_match = True
else:
if not self.title:
logger.info('Identifying locator for region not found')
else:
logger.info('Identifying locator for region %s not found', self.title)
ident_match = False
if self.title is None:
# If we don't have a title we can't match it, and some Regions are multi-page
# so we can't have a title set.
title_match = True
elif self.title and window_title == self.title:
title_match = True
else:
logger.info("Title %s doesn't match expected title %s", window_title, self.title)
title_match = False
return title_match and ident_match
def get_context_current_page():
"""
Returns the current page name
Returns: A string containing the current page name
"""
url = browser().current_url()
stripped = url.lstrip('https://')
return stripped[stripped.find('/'):stripped.rfind('?')]
class CachedTableHeaders(object):
"""the internal cache of headers
This allows columns to be moved and the Table updated. The :py:attr:`headers` stores
the header cache element and the list of headers are stored in _headers. The
attribute header_indexes is then created, before finally creating the items
attribute.
"""
def __init__(self, table):
self.headers = sel.elements('td | th', root=table.header_row)
self.indexes = {
attributize_string(cell.text): index
for index, cell in enumerate(self.headers)}
class Table(Pretty):
"""
Helper class for Table/List objects
Turns CFME custom Table/Lists into iterable objects using a generator.
Args:
table_locator: locator pointing to a table element with child thead and tbody elements
representing that table's header and body row containers
header_offset: In the case of a padding table row above the header, the row offset
can be used to skip rows in ``<thead>`` to locate the correct header row. This offset
is 1-indexed, not 0-indexed, so an offset of 1 is the first child row element
body_offset: In the case of a padding table row above the body rows, the row offset
can be used to skip rows in ``<ttbody>`` to locate the correct header row. This offset
is 1-indexed, not 0-indexed, so an offset of 1 is the first child row element
hidden_locator: If the table can disappear, you probably want ot set this param as it
instructs the table that if it cannot find the table on the page but the element
represented by ``hidden_locator`` is visible, it assumes no data and returns no rows.
Attributes:
header_indexes: A dict of header names related to their int index as a column.
Usage:
table = Table('//div[@id="prov_pxe_img_div"]//table')
The HTML code for the table looks something like this::
<div id="prov_pxe_img_div">
<table>
<thead>
<tr>
<th>Name</th>
<th>Animal</th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>John</td>
<td>Monkey</td>
<td>Small</td>
</tr>
<tr>
<td>Mike</td>
<td>Tiger</td>
<td>Large</td>
</tr>
</tbody>
</table>
</div>
We can now click on an element in the list like so, by providing the column
name and the value that we are searching for::
table.click_cell('name', 'Mike')
We can also perform the same, by using the index of the column, like so::
table.click_cell(1, 'Tiger')
Additionally, the rows of a table can be iterated over, and that row's columns can be accessed
by name or index (left to right, 0-index)::
for row in table.rows()
# Get the first cell in the row
row[0]
# Get the row's contents for the column with header 'Row Name'
# All of these will work, though the first is preferred
row.row_name, row['row_name'], row['Row Name']
When doing bulk opererations, such as selecting rows in a table based on their content,
the ``*_by_cells`` methods are able to find matching row much more quickly than iterating,
as the work can be done with fewer selenium calls.
* :py:meth:`find_rows_by_cells`
* :py:meth:`find_row_by_cells`
* :py:meth:`click_rows_by_cells`
* :py:meth:`click_row_by_cells`
Note:
A table is defined by the containers of the header and data areas, and offsets to them.
This allows a table to include one or more padding rows above the header row. In
the example above, there is no padding row, as our offset values are set to 0.
"""
pretty_attrs = ['_loc']
def __init__(self, table_locator, header_offset=0, body_offset=0, hidden_locator=None):
self._headers = None
self._header_indexes = None
self._loc = table_locator
self.header_offset = int(header_offset)
self.body_offset = int(body_offset)
self.hidden_locator = hidden_locator
@property
def header_row(self):
"""Property representing the ``<tr>`` element that contains header cells"""
# thead/tr containing header data
# xpath is 1-indexed, so we need to add 1 to the offset to get the correct row
return sel.element('./thead/tr[{}]'.format(self.header_offset + 1), root=sel.element(self))
@property
def body(self):
"""Property representing the ``<tbody>`` element that contains body rows"""
# tbody containing body rows
return sel.element('./tbody', root=sel.element(self))
@cached_property
def _headers_cache(self):
return CachedTableHeaders(self)
def verify_headers(self):
"""Verifies whether the headers in the table correspond with the cached ones."""
current_headers = CachedTableHeaders(self)
cached_headers = self._headers_cache
if current_headers.indexes != cached_headers.indexes:
raise exceptions.UsingSharedTables(
('{cn} suspects that you are using shared tables! '
'That means you are using one {cn} instance to represent different UI tables. '
'This is not possible due to the header caching, but also wrong from the '
'design point of view. Please, create separate instances of {cn} for EACH table '
'in the user interface.').format(cn=type(self).__name__))
def _update_cache(self):
"""refresh the cache in case we know its stale"""
try:
del self._headers_cache
except AttributeError:
pass # it's not cached, dont try to be eager
else:
self._headers_cache
@property
def headers(self):
"""List of ``<td>`` or ``<th>`` elements in :py:attr:`header_row`
"""
return self._headers_cache.headers
@property
def header_indexes(self):
"""Dictionary of header name: column index for this table's rows
Derived from :py:attr:`headers`
"""
return self._headers_cache.indexes
def locate(self):
return sel.move_to_element(self._loc)
@property
def _root_loc(self):
return self.locate()
def rows(self):
"""A generator method holding the Row objects
This generator yields Row objects starting at the first data row.
Yields:
:py:class:`Table.Row` object corresponding to the next row in the table.
"""
try:
index = self.body_offset
row_elements = sel.elements('./tr', root=self.body)
for row_element in row_elements[index:]:
yield self.create_row_from_element(row_element)
except (exceptions.CannotScrollException, NoSuchElementException):
if self.hidden_locator is None:
# No hiding is documented here, so just explode
raise
elif not sel.is_displayed(self.hidden_locator):
# Hiding is documented but the element that signalizes that it is all right is not
# present so explode too.
raise
else:
# The table is not present but there is something that signalizes it is all right
# but no data.
return
def rows_as_list(self):
"""Returns rows as list"""
return [i for i in self.rows()]
def row_count(self):
"""Returns row count"""
return len(self.rows_as_list())
def find_row(self, header, value):
"""
Finds a row in the Table by iterating through each visible item.
Args:
header: A string or int, describing which column to inspect.
value: The value to be compared when trying to identify the correct row
to return.
Returns:
:py:class:`Table.Row` containing the requested cell, else ``None``.
"""
return self.find_row_by_cells({header: value})
def find_cell(self, header, value):
"""
Finds an item in the Table by iterating through each visible item,
this work used to be done by the :py:meth::`click_cell` method but
has not been abstracted out to be called separately.
Args:
header: A string or int, describing which column to inspect.
value: The value to be compared when trying to identify the correct cell
to click.
Returns: WebElement of the element if item was found, else ``None``.
"""
matching_cell_rows = self.find_rows_by_cells({header: value})
try:
if isinstance(header, basestring):
return getattr(matching_cell_rows[0], header)
else:
return matching_cell_rows[0][header]
except IndexError:
return None
def find_rows_by_cells(self, cells, partial_check=False):
"""A fast row finder, based on cell content.
If you pass a regexp as a value, then it will be used with its ``.match()`` method.
Args:
cells: A dict of ``header: value`` pairs or a sequence of
nested ``(header, value)`` pairs.
partial_check: If to use the ``in`` operator rather than ``==``.
Returns: A list of containing :py:class:`Table.Row` objects whose contents
match all of the header: value pairs in ``cells``
"""
# accept dicts or supertuples
cells = dict(cells)
cell_text_loc = (
'.//td/descendant-or-self::*[contains(normalize-space(text()), "{}")]/ancestor::tr[1]')
matching_rows_list = list()
for value in cells.values():
# Get all td elements that contain the value text
matching_elements = sel.elements(cell_text_loc.format(value),
root=sel.move_to_element(self._root_loc))
if matching_elements:
matching_rows_list.append(set(matching_elements))
# Now, find the common row elements that matched all the input cells
# (though not yet matching values to headers)
if not matching_rows_list:
# If none matched, short out
return []
rows_elements = list(reduce(lambda set1, set2: set1 & set2, matching_rows_list))
# Convert them to rows
# This is slow, which is why we do it after reducing the row element pile,
# and not when building matching_rows_list, but it makes comparing header
# names and expected values easy
rows = [self.create_row_from_element(element) for element in rows_elements]
# Only include rows where the expected values are in the right columns
matching_rows = list()
def matching_row_filter(heading, value):
text = normalize_space(row[heading].text)
if isinstance(value, re._pattern_type):
return value.match(text) is not None
elif partial_check:
return value in text
else:
return text == value
for row in rows:
if all(matching_row_filter(*cell) for cell in cells.items()):
matching_rows.append(row)
return matching_rows
def find_row_by_cells(self, cells, partial_check=False):
"""Find the first row containing cells
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
Returns: The first matching row found, or None if no matching row was found
"""
try:
rows = self.find_rows_by_cells(cells, partial_check=partial_check)
return rows[0]
except IndexError:
return None
def click_rows_by_cells(self, cells, click_column=None, partial_check=False):
"""Click the cell at ``click_column`` in the rows matched by ``cells``
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
click_column: Which column in the row to click, defaults to None,
which will attempt to click the row element
Note:
The value of click_column can be a string or an int, and will be passed directly to
the item accessor (``__getitem__``) for :py:class:`Table.Row`
"""
rows = self.find_rows_by_cells(cells, partial_check=partial_check)
if click_column is not None:
rows = [row[click_column] for row in rows]
for row in rows:
if row is None:
self.verify_headers() # Suspected shared table use
sel.click(row)
def click_row_by_cells(self, cells, click_column=None, partial_check=False):
"""Click the cell at ``click_column`` in the first row matched by ``cells``
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
click_column: See :py:meth:`Table.click_rows_by_cells`
"""
row = self.find_row_by_cells(cells, partial_check=partial_check)
if row is None:
raise NameError('No row matching {} found'.format(repr(cells)))
elif click_column is not None:
row = row[click_column]
if row is None:
self.verify_headers() # Suspected shared table use
sel.click(row)
def create_row_from_element(self, row_element):
"""Given a row element in this table, create a :py:class:`Table.Row`
Args:
row_element: A table row (``<tr>``) WebElement representing a row in this table.
Returns: A :py:class:`Table.Row` for ``row_element``
"""
return Table.Row(row_element, self)
def click_cells(self, cell_map):
"""Submits multiple cells to be clicked on
Args:
cell_map: A mapping of header names and values, representing cells to click.
As an example, ``{'name': ['wing', 'nut']}, {'age': ['12']}`` would click on
the cells which had ``wing`` and ``nut`` in the name column and ``12`` in
the age column. The yaml example for this would be as follows::
list_items:
name:
- wing
- nut
age:
- 12
Raises:
NotAllItemsClicked: If some cells were unable to be found.
"""
failed_clicks = []
for header, values in cell_map.items():
if isinstance(values, basestring):
values = [values]
for value in values:
res = self.click_cell(header, value)
if not res:
failed_clicks.append("{}:{}".format(header, value))
if failed_clicks:
raise exceptions.NotAllItemsClicked(failed_clicks)
def click_cell(self, header, value):
"""Clicks on a cell defined in the row.
Uses the header identifier and a value to determine which cell to click on.
Args:
header: A string or int, describing which column to inspect.
value: The value to be compared when trying to identify the correct cell
to click the cell in.
Returns: ``True`` if item was found and clicked, else ``False``.
"""
cell = self.find_cell(header, value)
if cell:
sel.click(cell)
return True
else:
# This *might* lead to the shared table. So be safe here.
self.verify_headers()
return False
class Row(Pretty):
"""An object representing a row in a Table.
The Row object returns a dymanically addressable attribute space so that
the tables headers are automatically generated.
Args:
row_element: A table row ``WebElement``
parent_table: :py:class:`Table` containing ``row_element``
Notes:
Attributes are dynamically generated. The index/key accessor is more flexible
than the attr accessor, as it can operate on int indices and header names.
"""
pretty_attrs = ['row_element', 'table']
def __init__(self, row_element, parent_table):
self.table = parent_table
self.row_element = row_element
@property
def columns(self):
"""A list of WebElements corresponding to the ``<td>`` elements in this row"""
return sel.elements('./td', root=self.row_element)
def __getattr__(self, name):
"""
Returns Row element by header name
"""
try:
return self.columns[self.table.header_indexes[attributize_string(name)]]
except (KeyError, IndexError):
# Suspected shared table use
self.table.verify_headers()
# If it did not fail at that time, reraise
raise
def __getitem__(self, index):
"""
Returns Row element by header index or name
"""
try:
return self.columns[index]
except TypeError:
# Index isn't an int, assume it's a string
return getattr(self, attributize_string(index))
except IndexError:
# Suspected shared table use
self.table.verify_headers()
# If it did not fail at that time, reraise
raise
def __str__(self):
return ", ".join(["'{}'".format(el.text) for el in self.columns])
def __eq__(self, other):
if isinstance(other, type(self)):
# Selenium elements support equality checks, so we can, too.
return self.row_element == other.row_element
else:
return id(self) == id(other)
def locate(self):
# table.create_row_from_element(row_instance) might actually work...
return sel.move_to_element(self.row_element)
class CAndUGroupTable(Table):
"""Type of tables used in C&U, not tested in others.
Provides ``.groups()`` generator which yields group objects. A group objects consists of the
rows that are located in the group plus the summary informations. THe main principle is that
all the rows inside group are stored in group object's ``.rows`` and when the script encounters
the end of the group, it will store the summary data after the data rows as attributes, so eg.
``Totals:`` will become ``group.totals``. All the rows are represented as dictionaries.
"""
class States:
NORMAL_ROWS = 0
GROUP_SUMMARY = 1
class Group(object):
def __init__(self, group_id, headers, rows, info_rows):
self.id = group_id
self.rows = [dict(zip(headers, row)) for row in rows]
info_headers = headers[1:]
for info_row in info_rows:
name = info_row[0]
rest = info_row[1:]
data = dict(zip(info_headers, rest))
group_attr = attributize_string(name)
setattr(self, group_attr, data)
def __repr__(self):
return '<CAndUGroupTable.Group {}'.format(repr(self.id))
def paginated_rows(self):
from cfme.web_ui import paginator
for page in paginator.pages():
for row in self.rows():
yield row
def find_group(self, group_id):
"""Finds a group by its group ID (the string that is alone on the line)"""
for group in self.groups():
if group.id == group_id:
return group_id
else:
raise KeyError('Group {} not found'.format(group_id))
def groups(self):
headers = map(sel.text, self.headers)
headers_length = len(headers)
rows = self.paginated_rows()
current_group_rows = []
current_group_summary_rows = []
current_group_id = None
state = self.States.NORMAL_ROWS
while True:
try:
row = rows.next()
except StopIteration:
if state == self.States.GROUP_SUMMARY:
row = None
else:
break
if state == self.States.NORMAL_ROWS:
if len(row.columns) == headers_length:
current_group_rows.append(tuple(map(sel.text, row.columns)))
else:
# Transition to the group summary
current_group_id = sel.text(row.columns[0]).strip()
state = self.States.GROUP_SUMMARY
elif state == self.States.GROUP_SUMMARY:
# row is None == we are at the end of the table so a slightly different behaviour
if row is not None:
fc_length = len(sel.text(row.columns[0]).strip())
if row is None or fc_length == 0:
# Done with group
yield self.Group(
current_group_id, headers, current_group_rows, current_group_summary_rows)
current_group_rows = []
current_group_summary_rows = []
current_group_id = None
state = self.States.NORMAL_ROWS
else:
current_group_summary_rows.append(tuple(map(sel.text, row.columns)))
else:
raise RuntimeError('This should never happen')
if current_group_id is not None or current_group_rows or current_group_summary_rows:
raise ValueError(
'GroupTable could not be parsed properly: {} {} {}'.format(
current_group_id, repr(current_group_rows), repr(current_group_summary_rows)))
class SplitTable(Table):
""":py:class:`Table` that supports the header and body rows being in separate tables
Args:
header_data: A tuple, containing an element locator and an offset value.
These point to the container of the header row. The offset is used in case
there is a padding row above the header, or in the case that the header
and the body are contained inside the same table element.
body_data: A tuple, containing an element locator and an offset value.
These point to the container of the body rows. The offset is used in case
there is a padding row above the body rows, or in the case that the header
and the body are contained inside the same table element.
Usage:
table = SplitTable(header_data=('//div[@id="header_table"]//table/tbody', 0),
body_data=('//div[@id="body_table"]//table/tbody', 1))
The HTML code for a split table looks something like this::
<div id="prov_pxe_img_div">
<table id="header_table">
<tbody>
<tr>
<td>Name</td>
<td>Animal</td>
<td>Size</td>
</tr>
</tbody>
</table>
<table id="body_table">
<tbody>
<tr>
<td>Useless</td>
<td>Padding</td>
<td>Row</td>
</tr>
<tr>
<td>John</td>
<td>Monkey</td>
<td>Small</td>
</tr>
<tr>
<td>Mike</td>
<td>Tiger</td>
<td>Large</td>
</tr>
</tbody>
</table>
</div>
Note the use of the offset to skip the "Useless Padding Row" in ``body_data``. Most split
tables require an offset for both the heading and body rows.
"""
def __init__(self, header_data, body_data):
self._header_loc, header_offset = header_data
self._body_loc, body_offset = body_data
self.header_offset = int(header_offset)
self.body_offset = int(body_offset)
@property
def _root_loc(self):
return self._body_loc
@property
def header_row(self):
"""Property representing the ``<tr>`` element that contains header cells"""
# thead/tr containing header data
# xpath is 1-indexed, so we need to add 1 to the offset to get the correct row
return sel.element(
'tr[{}]'.format(self.header_offset + 1), root=sel.element(self._header_loc))
@property
def body(self):
"""Property representing the element that contains body rows"""
# tbody containing body rows
return sel.element(self._body_loc)
def locate(self):
# Use the header locator as the overall table locator
return sel.move_to_element(self._header_loc)
class SortTable(Table):
"""This table is the same as :py:class:`Table`, but with added sorting functionality."""
SORT_CELL = './th[./div/i[contains(@class, "fa-sort")] or contains(@class, "sorting_")]'
SORT_LINK = './th/a[normalize-space(.)={}]'
@property
def _sort_by_cell(self):
try:
return sel.element(self.SORT_CELL, root=self.header_row)
except NoSuchElementException:
return None
@property
def sorted_by(self):
"""Return column name what is used for sorting now.
"""
cell = self._sort_by_cell
if cell is None:
return None
return sel.text("./a", root=cell).encode("utf-8")
@property
def sort_order(self):
"""Return order.
Returns: 'ascending' or 'descending'
"""
cell = self._sort_by_cell
if cell is None:
return None
try:
# Newer type
el = sel.element('./div/i[contains(@class, "fa-sort")]', root=cell)
except NoSuchElementException:
# Older type
el = cell
cls = sel.get_attribute(el, "class")
if "fa-sort-asc" in cls or 'sorting_asc' in cls:
return "ascending"
elif "fa-sort-desc" in cls or 'sorting_desc' in cls:
return "descending"
else:
return None
def click_header_cell(self, text):
"""Clicks on the header to change sorting conditions.
Args:
text: Header cell text.
"""
sel.click(sel.element(self.SORT_LINK.format(quoteattr(text)), root=self.header_row))
def sort_by(self, header, order):
"""Sorts the table by given conditions
Args:
header: Text of the header cell to use for sorting.
order: ascending or descending
"""
order = order.lower().strip()
if header != self.sorted_by:
# Change column to order by
self.click_header_cell(header)
if self.sorted_by != header:
raise Exception(
"Detected malfunction in table ordering (wanted {}, got {})".format(
header, self.sorted_by))
if order != self.sort_order:
# Change direction
self.click_header_cell(header)
if self.sort_order != order:
raise Exception("Detected malfunction in table ordering (wanted {}, got {})".format(
order, self.sort_order))
class CheckboxTable(Table):
""":py:class:`Table` with support for checkboxes
Args:
table_locator: See :py:class:`cfme.web_ui.Table`
header_checkbox_locator: Locator of header checkbox (default `None`)
Specify in case the header checkbox is not part of the header row
body_checkbox_locator: Locator for checkboxes in body rows
header_offset: See :py:class:`cfme.web_ui.Table`
body_offset: See :py:class:`cfme.web_ui.Table`
"""
_checkbox_loc = ".//input[@type='checkbox']"
def __init__(self, table_locator, header_offset=0, body_offset=0,
header_checkbox_locator=None, body_checkbox_locator=None):
super(CheckboxTable, self).__init__(table_locator, header_offset, body_offset)
if body_checkbox_locator:
self._checkbox_loc = body_checkbox_locator
self._header_checkbox_loc = header_checkbox_locator
@property
def header_checkbox(self):
"""Checkbox used to select/deselect all rows"""
if self._header_checkbox_loc is not None:
return sel.element(self._header_checkbox_loc)
else:
return sel.element(self._checkbox_loc, root=self.header_row)
def select_all(self):
"""Select all rows using the header checkbox or one by one if not present"""
if self._header_checkbox_loc is None:
for row in self.rows():
self._set_row_checkbox(row, True)
else:
sel.uncheck(self.header_checkbox)
sel.check(self.header_checkbox)
def deselect_all(self):
"""Deselect all rows using the header checkbox or one by one if not present"""
if self._header_checkbox_loc is None:
for row in self.rows():
self._set_row_checkbox(row, False)
else:
sel.check(self.header_checkbox)
sel.uncheck(self.header_checkbox)
def _set_row_checkbox(self, row, set_to=False):
row_checkbox = sel.element(self._checkbox_loc, root=row.locate())
sel.checkbox(row_checkbox, set_to)
def _set_row(self, header, value, set_to=False):
""" Internal method used to select/deselect a row by column header and cell value
Args:
header: See :py:meth:`Table.find_row`
value: See :py:meth:`Table.find_row`
set_to: Select if `True`, deselect if `False`
"""
row = self.find_row(header, value)
if row:
self._set_row_checkbox(row, set_to)
return True
else:
return False
def select_rows_by_indexes(self, *indexes):
"""Select rows specified by row indexes (starting with 0)
"""
for i, row in enumerate(self.rows()):
if i in indexes:
self._set_row_checkbox(row, True)
def deselect_rows_by_indexes(self, *indexes):
"""Deselect rows specified by row indexes (starting with 0)
"""
for i, row in enumerate(self.rows()):
if i in indexes:
self._set_row_checkbox(row, False)
def select_row(self, header, value):
"""Select a single row specified by column header and cell value
Args:
header: See :py:meth:`Table.find_row`
value: See :py:meth:`Table.find_row`
Returns: `True` if successful, `False` otherwise
"""
return self._set_row(header, value, True)
def deselect_row(self, header, value):
"""Deselect a single row specified by column header and cell value
Args:
header: See :py:meth:`Table.find_row`
value: See :py:meth:`Table.find_row`
Returns: `True` if successful, `False` otherwise
"""
return self._set_row(header, value, False)
def _set_rows(self, cell_map, set_to=False):
""" Internal method used to select/deselect multiple rows
Args:
cell_map: See :py:meth:`Table.click_cells`
set_to: Select if `True`, deselect if `False`
"""
failed_selects = []
for header, values in cell_map.items():
if isinstance(values, basestring):
values = [values]
for value in values:
res = self._set_row(header, value, set_to)
if not res:
failed_selects.append("{}:{}".format(header, value))
if failed_selects:
raise exceptions.NotAllCheckboxesFound(failed_selects)
def select_rows(self, cell_map):
"""Select multiple rows
Args:
cell_map: See :py:meth:`Table.click_cells`
Raises:
NotAllCheckboxesFound: If some cells were unable to be found
"""
self._set_rows(cell_map, True)
def deselect_rows(self, cell_map):
"""Deselect multiple rows
Args:
cell_map: See :py:meth:`Table.click_cells`
Raises:
NotAllCheckboxesFound: If some cells were unable to be found
"""
self._set_rows(cell_map, False)
def _set_row_by_cells(self, cells, set_to=False, partial_check=False):
row = self.find_row_by_cells(cells, partial_check=partial_check)
if row:
self._set_row_checkbox(row, set_to)
else:
raise sel_exceptions.NoSuchElementException()
def select_row_by_cells(self, cells, partial_check=False):
"""Select the first row matched by ``cells``
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
"""
self._set_row_by_cells(cells, True, partial_check)
def deselect_row_by_cells(self, cells, partial_check=False):
"""Deselect the first row matched by ``cells``
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
"""
self._set_row_by_cells(cells, False, partial_check)
def _set_rows_by_cells(self, cells, set_to=False, partial_check=False):
rows = self.find_rows_by_cells(cells)
for row in rows:
self._set_row_checkbox(row, set_to)
def select_rows_by_cells(self, cells, partial_check=False):
"""Select the rows matched by ``cells``
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
"""
self._set_rows_by_cells(cells, True, partial_check)
def deselect_rows_by_cells(self, cells, partial_check=False):
"""Deselect the rows matched by ``cells``
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
"""
self._set_rows_by_cells(cells, False, partial_check)
class SplitCheckboxTable(SplitTable, CheckboxTable):
""":py:class:`SplitTable` with support for checkboxes
Args:
header_data: See :py:class:`cfme.web_ui.SplitTable`
body_data: See :py:class:`cfme.web_ui.SplitTable`
header_checkbox_locator: See :py:class:`cfme.web_ui.CheckboxTable`
body_checkbox_locator: See :py:class:`cfme.web_ui.CheckboxTable`
header_offset: See :py:class:`cfme.web_ui.Table`
body_offset: See :py:class:`cfme.web_ui.Table`
"""
_checkbox_loc = './/img[contains(@src, "item_chk")]'
def __init__(self, header_data, body_data,
header_checkbox_locator=None, body_checkbox_locator=None):
# To limit multiple inheritance surprises, explicitly call out to SplitTable's __init__
SplitTable.__init__(self, header_data, body_data)
# ...then set up CheckboxTable's locators here
self._header_checkbox_loc = header_checkbox_locator
if body_checkbox_locator:
self._checkbox_loc = body_checkbox_locator
class PagedTable(Table):
""":py:class:`Table` with support for paginator
Args:
table_locator: See :py:class:`cfme.web_ui.Table`
header_checkbox_locator: Locator of header checkbox (default `None`)
Specify in case the header checkbox is not part of the header row
body_checkbox_locator: Locator for checkboxes in body rows
header_offset: See :py:class:`cfme.web_ui.Table`
body_offset: See :py:class:`cfme.web_ui.Table`
"""
def find_row_on_all_pages(self, header, value):
from cfme.web_ui import paginator
for _ in paginator.pages():
sel.wait_for_element(self)
row = self.find_row(header, value)
if row is not None:
return row
def find_row_by_cell_on_all_pages(self, cells):
"""Find the first row containing cells on all pages
Args:
cells: See :py:meth:`Table.find_rows_by_cells`
Returns: The first matching row found on any page
"""
from cfme.web_ui import paginator
for _ in paginator.pages():
sel.wait_for_element(self)
row = self.find_row_by_cells(cells)
if row is not None:
return row
def table_in_object(table_title):
"""If you want to point to tables inside object view, this is what you want to use.
Works both on down- and upstream.
Args:
table_title: Text in `p` element preceeding the table
Returns: XPath locator for the desired table.
"""
return ("//table[(preceding-sibling::p[1] | preceding-sibling::h3[1])[normalize-space(.)={}]]"
.format(quoteattr(table_title)))
@multimethod(lambda loc, value: (sel.tag(loc), sel.get_attribute(loc, 'type')))
def fill_tag(loc, value):
""" Return a tuple of function to do the filling, and a value to log."""
raise NotImplementedError("Don't know how to fill {} into this type: {}".format(value, loc))
@fill_tag.method(("select", Anything))
def fill_select_tag(select, value):
return (sel.select, value)
@fill_tag.method((Anything, 'text'))
@fill_tag.method((Anything, 'textarea'))
def fill_text(textbox, val):
return (sel.set_text, val)
@fill_tag.method((Anything, 'number'))
def fill_number(bmbox, val):
return (sel.set_text, val)
@fill_tag.method((Anything, 'password'))
def fill_password(pwbox, password):
return (sel.set_text, "********")
@fill_tag.method(('a', Anything))
@fill_tag.method(('img', Anything))
@fill_tag.method((Anything, 'image'))
@fill_tag.method((Anything, 'submit'))
def fill_click(el, val):
"""Click only when given a truthy value"""
def click_if(e, v):
if v:
sel.click(e)
return (click_if, val)
@fill_tag.method((Anything, 'file'))
def fill_file(fd, val):
return (sel.send_keys, val)
@fill_tag.method((Anything, 'checkbox'))
def fill_checkbox(cb, val):
return (sel.checkbox, bool(val))
@multidispatch
def fill(loc, content, **kwargs):
"""
Fills in a UI component with the given content.
Usage:
fill(textbox, "text to fill")
fill(myform, [ ... data to fill ...])
fill(radio, "choice to select")
Returns: True if any UI action was taken, False otherwise
"""
action, logval = fill_tag(loc, content)
if hasattr(loc, 'name'):
ident = loc.name
else:
ident = loc
logger.debug(' Filling in [%s], with value %s', ident, logval)
prev_state = action(loc, content)
sel.detect_observed_field(loc)
return prev_state
@fill.method((Mapping, Anything))
def _version_pick(m, a, **kwargs):
return fill(version.pick(m), a, **kwargs)
@fill.method((Table, Mapping))
def _sd_fill_table(table, cells):
""" How to fill a table with a value (by selecting the value as cells in the table)
See Table.click_cells
"""
table._update_cache()
logger.debug(' Clicking Table cell')
table.click_cells(cells)
return bool(cells)
@fill.method((CheckboxTable, object))
def _sd_fill_checkboxtable(table, cells):
""" How to fill a checkboxtable with a value (by selecting the right rows)
See CheckboxTable.select_by_cells
"""
table._update_cache()
logger.debug(' Selecting CheckboxTable row')
table.select_rows(cells)
return bool(cells)
@fill.method((Callable, object))
def fill_callable(f, val):
"""Fill in a Callable by just calling it with the value, allow for arbitrary actions"""
return f(val)
@fill.method((Select, types.NoneType))
@fill.method((Select, object))
def fill_select(slist, val):
logger.debug(' Filling in {} with value {}'.format(str(slist), val))
prev_sel = sel.select(slist, val)
slist.observer_wait()
return prev_sel
class Calendar(Pretty):
"""A CFME calendar form field
Calendar fields are readonly, and managed by the dxhtmlCalendar widget. A Calendar field
will accept any object that can be coerced into a string, but the value may not match the format
expected by dhtmlxCalendar or CFME. For best results, either a ``datetime.date`` or
``datetime.datetime`` object should be used to create a valid date field.
Args:
name: "name" property of the readonly calendar field.
Usage:
calendar = web_ui.Calendar("miq_date_1")
web_ui.fill(calendar, date(2000, 1, 1))
web_ui.fill(calendar, '1/1/2001')
"""
def __init__(self, name):
self.name = name
def locate(self):
return sel.move_to_element(Input(self.name))
@fill.method((Calendar, object))
def _sd_fill_date(calendar, value):
input = sel.element(calendar)
if isinstance(value, date):
date_str = '{}/{}/{}'.format(value.month, value.day, value.year)
else:
date_str = str(value)
# need to write to a readonly field: resort to evil
if sel.get_attribute(input, 'ng-model') is not None:
sel.set_angularjs_value(input, date_str)
else:
sel.set_attribute(input, "value", date_str)
# Now when we set the value, we need to simulate a change event.
if sel.get_attribute(input, "data-date-autoclose"):
# New one
script = "$(\"#{}\").trigger('changeDate');"
else:
# Old one
script = (
"if(typeof $j == 'undefined') {var jq = $;} else {var jq = $j;} "
"jq(\"#{}\").change();")
try:
sel.execute_script(script.format(calendar.name))
except sel_exceptions.WebDriverException as e:
logger.warning(
"An exception was raised during handling of the Cal #{}'s change event:\n{}"
.format(calendar.name, str(e)))
sel.wait_for_ajax()
return True
@fill.method((object, types.NoneType))
@fill.method((types.NoneType, object))
def _sd_fill_none(*args, **kwargs):
""" Ignore a NoneType """
pass
class Form(Region):
"""
A class for interacting with Form elements on pages.
The Form class takes a set of locators and binds them together to create a
unified Form object. This Form object has a defined field order so that the
user does not have to worry about which order the information is provided.
This enables the data to be provided as a dict meaning it can be passed directly
from yamls. It inherits the base Region class, meaning that locators can still be
referenced in the same way a Region's locators can. You can also add one more field which will
be a :py:class:`dict` of metadata, determining mostly field validity. See :py:meth:`field_valid`
Args:
fields: A list of field name/locator tuples. The argument not only defines
the order of the elements but also which elements comprise part of the form.
identifying_loc: A locator which should be present if the form is visible.
Usage:
provider_form = web_ui.Form(
fields=[
('type_select', "//*[@id='server_emstype']"),
('name_text', "//*[@id='name']"),
('hostname_text', "//*[@id='hostname']"),
('ipaddress_text', "//*[@id='ipaddress']"),
('amazon_region_select', "//*[@id='hostname']"),
('api_port', "//*[@id='port']"),
])
Forms can then be filled in like so.::
provider_info = {
'type_select': "OpenStack",
'name_text': "RHOS-01",
'hostname_text': "RHOS-01",
'ipaddress_text': "10.0.0.0",
'api_port': "5000",
}
web_ui.fill(provider_form, provider_info)
Note:
Using supertuples in a list, although ordered due to the properties of a List,
will not overide the field order defined in the Form.
"""
pretty_attrs = ['fields']
def __init__(self, fields=None, identifying_loc=None):
self.metadata = {}
self.locators = {}
fields_seen = set()
for field in fields:
try:
if field[0] in fields_seen:
raise ValueError('You cannot have duplicate field names in a Form ({})'.format(
field[0]))
self.locators[field[0]] = field[1]
if len(field) == 3:
self.metadata[field[0]] = field[2]
fields_seen.add(field[0])
except IndexError:
raise ValueError("fields= can be 2- or 3-tuples only! (name, loc[, metadata])")
self.fields = fields
self.identifying_loc = identifying_loc
def field_valid(self, field_name):
"""Add the validity constraints here."""
if field_name not in self.metadata:
return True
metadata = self.metadata[field_name]
if "removed_since" in metadata:
removed_since = metadata["removed_since"]
return version.current_version() < removed_since
if "appeared_in" in metadata:
appeared_in = metadata["appeared_in"]
return version.current_version() >= appeared_in
return True
def fill(self, fill_data):
fill(self, fill_data)
@fill.method((Form, Sequence))
def _fill_form_list(form, values, action=None, action_always=False):
"""Fills in field elements on forms
Takes a set of values in dict or supertuple format and locates form elements,
in the correct order, and fills them in.
Note:
Currently supports, text, textarea, select, checkbox, radio, password, a
and Table objects/elements.
Args:
values: a dict or supertuple formatted set of data where
each key is the name of the form locator from the page model. Some
objects/elements, such as :py:class:`Table` objects, support providing
multiple values to be clicked on in a single call.
action: a locator which will be clicked when the form filling is complete
action_always: if True, perform the action even if none of the
values to be filled in required any UI
interaction (eg, text boxes already had the
text to be filled in, checkbox already checked,
etc)
"""
logger.info('Beginning to fill in form...')
sel.wait_for_ajax()
values = list(val for key in form.fields for val in values if val[0] == key[0])
res = []
for field, value in values:
if value is not None and form.field_valid(field):
loc = form.locators[field]
try:
sel.wait_for_element(loc, timeout=10)
except TypeError:
# TypeError - when loc is not resolvable to an element, elements() will yell
# vvv An alternate scenario when element is not resolvable, just wait a bit.
time.sleep(1)
except TimedOutError:
logger.warning("This element [{}] couldn't be waited for".format(loc))
logger.trace(' Dispatching fill for %s', field)
fill_prev = fill(loc, value) # re-dispatch to fill for each item
res.append(fill_prev != value) # note whether anything changed
elif value is None and isinstance(form.locators[field], Select):
fill_prev = fill(form.locators[field], None)
res.append(fill_prev != value)
else:
res.append(False)
if action and (any(res) or action_always): # only perform action if something changed
logger.debug(' Invoking end of form action')
fill(action, True) # re-dispatch with truthy value
logger.debug('Finished filling in form')
return any(res) or action_always
@fill.method((object, Mapping))
def _fill_form_dict(form, values, **kwargs):
"""Fill in a dict by converting it to a list"""
return _fill_form_list(form, values.items(), **kwargs)
class Input(Pretty):
"""Class designed to handle things about ``<input>`` tags that have name attr in one place.
Also applies on ``textarea``, which is basically input with multiple lines (if it has name).
Args:
*names: Possible values (or) of the ``name`` attribute.
Keywords:
use_id: Whether to use ``id`` instead of ``name``. Useful if there is some input that does
not have ``name`` attribute present.
"""
pretty_attrs = ['_names', '_use_id']
def __init__(self, *names, **kwargs):
self._names = names
self._use_id = kwargs.pop("use_id", False)
@property
def names(self):
if len(self._names) == 1 and isinstance(self._names[0], dict):
return (version.pick(self._names[0]),)
else:
return self._names
def _generate_attr(self, name):
return "@{}={}".format("id" if self._use_id else "name", quoteattr(name))
def locate(self):
# If the end of the locator is changed, modify also the choice in Radio!!!
return '//*[(self::input or self::textarea) and ({})]'.format(
" or ".join(self._generate_attr(name) for name in self.names)
)
@property
def angular_help_block(self):
"""Returns the first visible angular helper text (like 'Required')."""
loc = (
'{0}/following-sibling::span[not(contains(@class, "ng-hide"))]'
'| {0}/following-sibling::div/span[not(contains(@class, "ng-hide"))]'
.format(self.locate()))
try:
return sel.text(loc).strip()
except NoSuchElementException:
return None
def __add__(self, string):
return self.locate() + string
def __radd__(self, string):
return string + self.locate()
class FileInput(Input):
"""A file input handling widget.
Accepts a string. If the string is a file, then it is put in the input. Otherwise a temporary
file is generated and that one is fed to the file input.
"""
pass
@fill.method((FileInput, Anything))
def _fill_file_input(i, a):
# TODO Upgrade selenium to 3.0.1+, this breaks in chrome at send_keys()
# https://github.com/SeleniumHQ/selenium/issues/2906
# Engage the selenium's file detector so we can reliably transfer the file to the browser
with browser().file_detector_context(LocalFileDetector):
# We need a raw element so we can send_keys to it
input_el = sel.element(i.locate())
if browser().file_detector.is_local_file(a) is None:
# Create a temp file
f = NamedTemporaryFile()
f.write(str(a))
f.flush()
input_el.send_keys(os.path.abspath(f.name))
atexit.register(f.close)
else:
# It already is a file ...
input_el.send_keys(a)
# Since we used raw selenium element, wait for ajax here ...
sel.wait_for_ajax()
class Radio(Input):
""" A class for Radio button groups
Radio allows the usage of HTML radio elements without resorting to previous
practice of iterating over elements to find the value. The name of the radio
group is passed and then when choices are required, the locator is built.
Args:
name: The HTML elements ``name`` attribute that identifies a group of radio
buttons.
Usage:
radio = Radio("schedule__schedule_type")
A specific radio element can then be returned by running the following::
el = radio.choice('immediately')
click(el)
The :py:class:`Radio` object can be reused over and over with repeated calls to
the :py:func:`Radio.choice` method.
"""
def choice(self, val):
""" Returns the locator for a choice
Args:
val: A string representing the ``value`` attribute of the specific radio
element.
Returns: A string containing the XPATH of the specific radio element.
"""
# Ugly, but working - all the conditions are in parentheses
return re.sub(r"\]$", " and @value={}]".format(quoteattr(val)), self.locate())
def observer_wait(self, val):
sel.detect_observed_field(self.choice(val))
@fill.method((Radio, object))
def _fill_radio(radio, value):
"""How to fill a radio button group (by selecting the given value)"""
logger.debug(' Filling in Radio{} with value "{}"'.format(tuple(radio.names), value))
sel.click(radio.choice(value))
radio.observer_wait(value)
class BootstrapTreeview(object):
"""A class representing the Bootstrap treeview used in newer builds.
Implements ``expand_path``, ``click_path``, ``read_contents``. All are implemented in manner
very similar to the original :py:class:`Tree`.
Args:
tree_id: Id of the tree, the closest div to the root ``ul`` element.
"""
ROOT_ITEMS = './ul/li[not(./span[contains(@class, "indent")])]'
ROOT_ITEMS_WITH_TEXT = (
'./ul/li[not(./span[contains(@class, "indent")]) and contains(normalize-space(.), {text})]')
SELECTED_ITEM = './ul/li[contains(@class, "node-selected")]'
CHILD_ITEMS = (
'./ul/li[starts-with(@data-nodeid, {id})'
' and count(./span[contains(@class, "indent")])={indent}]')
CHILD_ITEMS_TEXT = (
'./ul/li[starts-with(@data-nodeid, {id})'
' and contains(normalize-space(.), {text})'
' and count(./span[contains(@class, "indent")])={indent}]')
ITEM_BY_NODEID = './ul/li[@data-nodeid={}]'
IS_EXPANDABLE = './span[contains(@class, "expand-icon")]'
IS_EXPANDED = './span[contains(@class, "expand-icon") and contains(@class, "fa-angle-down")]'
IS_CHECKABLE = './span[contains(@class, "check-icon")]'
IS_CHECKED = './span[contains(@class, "check-icon") and contains(@class, "fa-check-square-o")]'
IS_LOADING = './span[contains(@class, "expand-icon") and contains(@class, "fa-spinner")]'
INDENT = './span[contains(@class, "indent")]'
def __init__(self, tree_id):
self.tree_id = tree_id
@classmethod
def image_getter(cls, item):
"""Look up the image that is hidden in the style tag
Returns:
The name of the image without the hash, path and extension.
"""
try:
image_node = sel.element('./span[contains(@class, "node-image")]', root=item)
except NoSuchElementException:
return None
style = sel.get_attribute(image_node, 'style')
image_href = re.search(r'url\("([^"]+)"\)', style).groups()[0]
return re.search(r'/([^/]+)-[0-9a-f]+\.png$', image_href).groups()[0]
def locate(self):
return '#{}'.format(self.tree_id)
@property
def selected_item(self):
return sel.element(self.SELECTED_ITEM, root=self)
@classmethod
def indents(cls, item):
return len(sel.elements(cls.INDENT, root=item))
@classmethod
def is_expandable(cls, item):
return bool(sel.elements(cls.IS_EXPANDABLE, root=item))
@classmethod
def is_expanded(cls, item):
return bool(sel.elements(cls.IS_EXPANDED, root=item))
@classmethod
def is_checkable(cls, item):
return bool(sel.elements(cls.IS_CHECKABLE, root=item))
@classmethod
def is_checked(cls, item):
return bool(sel.elements(cls.IS_CHECKED, root=item))
@classmethod
def is_loading(cls, item):
return bool(sel.elements(cls.IS_LOADING, root=item))
@classmethod
def is_collapsed(cls, item):
return not cls.is_expanded(item)
@classmethod
def is_selected(cls, item):
return 'node-selected' in sel.classes(item)
@classmethod
def get_nodeid(cls, item):
return sel.get_attribute(item, 'data-nodeid')
@classmethod
def get_expand_arrow(cls, item):
return sel.element(cls.IS_EXPANDABLE, root=item)
def child_items(self, item=None):
if item is not None:
nodeid = unescape(quoteattr(self.get_nodeid(item) + '.'))
node_indents = self.indents(item) + 1
return sel.elements(self.CHILD_ITEMS.format(id=nodeid, indent=node_indents), root=self)
else:
return sel.elements(self.ROOT_ITEMS, root=self)
def child_items_with_text(self, item, text):
text = unescape(quoteattr(text))
if item is not None:
nodeid = unescape(quoteattr(self.get_nodeid(item) + '.'))
node_indents = self.indents(item) + 1
return sel.elements(
self.CHILD_ITEMS_TEXT.format(id=nodeid, text=text, indent=node_indents), root=self)
else:
return sel.elements(self.ROOT_ITEMS_WITH_TEXT.format(text=text), root=self)
def get_item_by_nodeid(self, nodeid):
nodeid_q = unescape(quoteattr(nodeid))
try:
return sel.element(self.ITEM_BY_NODEID.format(nodeid_q), root=self)
except NoSuchElementException:
raise exceptions.CandidateNotFound({
'message':
'Could not find the item with nodeid {} in Boostrap tree {}'.format(
nodeid,
self.tree_id),
'path': '',
'cause': ''})
def expand_node(self, nodeid):
"""Expands a node given its nodeid. Must be visible
Args:
nodeid: ``nodeId`` of the node
Returns:
``True`` if it was possible to expand the node, otherwise ``False``.
"""
logger.trace('Expanding node %s on tree %s', nodeid, self.tree_id)
node = self.get_item_by_nodeid(nodeid)
if not self.is_expandable(node):
return False
if self.is_collapsed(node):
arrow = self.get_expand_arrow(node)
sel.click(arrow)
time.sleep(0.1)
wait_for(
lambda: not self.is_loading(self.get_item_by_nodeid(nodeid)),
delay=0.2, num_sec=30)
wait_for(
lambda: self.is_expanded(self.get_item_by_nodeid(nodeid)),
delay=0.2, num_sec=10)
return True
def collapse_node(self, nodeid):
"""Collapses a node given its nodeid. Must be visible
Args:
nodeid: ``nodeId`` of the node
Returns:
``True`` if it was possible to expand the node, otherwise ``False``.
"""
logger.trace('Collapsing node %s on tree %s', nodeid, self.tree_id)
node = self.get_item_by_nodeid(nodeid)
if not self.is_expandable(node):
return False
if self.is_expanded(node):
arrow = self.get_expand_arrow(node)
sel.click(arrow)
time.sleep(0.1)
wait_for(
lambda: self.is_collapsed(self.get_item_by_nodeid(nodeid)),
delay=0.2, num_sec=10)
return True
@classmethod
def _process_step(cls, step):
"""Steps can be plain strings or tuples when matching images"""
if isinstance(step, dict):
# Version pick and call again ...
return cls._process_step(version.pick(step))
if isinstance(step, tuple):
image = step[0]
step = step[1]
else:
image = None
if not isinstance(step, (basestring, re._pattern_type)):
step = str(step)
return image, step
@staticmethod
def _repr_step(image, step):
if isinstance(step, re._pattern_type):
# Make it look like r'pattern'
step_repr = 'r' + re.sub(r'^[^"\']', '', repr(step.pattern))
else:
step_repr = step
if image is None:
return step_repr
else:
return '{}[{}]'.format(step_repr, image)
@classmethod
def pretty_path(cls, path):
return '/'.join(cls._repr_step(*cls._process_step(step)) for step in path)
@classmethod
def validate_node(cls, node, matcher, image):
text = sel.text(node)
if isinstance(matcher, re._pattern_type):
match = matcher.match(text) is not None
else:
match = matcher == text
if not match:
return False
if image is not None and cls.image_getter(node) != image:
return False
return True
def expand_path(self, *path, **kwargs):
"""Expands given path and returns the leaf node.
The path items can be plain strings. In that case, exact string matching happens. Path items
can also be compiled regexps, where the ``match`` method is used to determine if the node
is the one we want. And finally, the path items can be 2-tuples, where the second item can
be the string or regular expression and the first item is the image to be matched using
:py:meth:`image_getter` method.
Args:
*path: The path (explained above)
Returns:
The leaf WebElement.
Raises:
:py:class:`exceptions.CandidateNotFound` when the node is not found in the tree.
"""
sel.wait_for_ajax()
logger.info('Expanding path %s on tree %s', self.pretty_path(path), self.tree_id)
node = None
steps_tried = []
for step in path:
steps_tried.append(step)
image, step = self._process_step(step)
if node is not None and not self.expand_node(self.get_nodeid(node)):
raise exceptions.CandidateNotFound({
'message':
'Could not find the item {} in Boostrap tree {}'.format(
self.pretty_path(steps_tried),
self.tree_id),
'path': path,
'cause': 'Could not expand the {} node'.format(self._repr_step(image, step))})
if isinstance(step, basestring):
# To speed up the search when having a string to match, pick up items with that text
child_items = self.child_items_with_text(node, step)
else:
# Otherwise we need to go through all of them.
child_items = self.child_items(node)
for child_item in child_items:
if self.validate_node(child_item, step, image):
node = child_item
break
else:
try:
cause = 'Was not found in {}'.format(
self._repr_step(*self._process_step(steps_tried[-2])))
except IndexError:
# There is only one item, probably root?
cause = 'Could not find {}'.format(
self._repr_step(*self._process_step(steps_tried[0])))
raise exceptions.CandidateNotFound({
'message':
'Could not find the item {} in Boostrap tree {}'.format(
self.pretty_path(steps_tried),
self.tree_id),
'path': path,
'cause': cause})
return node
def click_path(self, *path, **kwargs):
"""Expands the path and clicks the leaf node.
See :py:meth:`expand_path` for more informations about synopsis.
"""
node = self.expand_path(*path, **kwargs)
sel.click(node)
return node
def read_contents(self, nodeid=None, include_images=False, collapse_after_read=False):
if nodeid is not None:
item = self.get_item_by_nodeid(nodeid)
self.expand_node(nodeid)
else:
item = None
result = []
for child_item in self.child_items(item):
result.append(
self.read_contents(
nodeid=self.get_nodeid(child_item),
include_images=include_images,
collapse_after_read=collapse_after_read))
if collapse_after_read and nodeid is not None:
self.collapse_node(nodeid)
if include_images and item is not None:
this_item = (self.image_getter(item), sel.text(item))
elif item is not None:
this_item = sel.text(item)
else:
this_item = None
if result and this_item is not None:
return [this_item, result]
elif result:
return result
else:
return this_item
def check_uncheck_node(self, check, *path, **kwargs):
leaf = self.expand_path(*path, **kwargs)
if not self.is_checkable(leaf):
raise TypeError('Item with path {} in {} is not checkable'.format(
self.pretty_path(path), self.tree_id))
checked = self.is_checked(leaf)
if checked != check:
sel.click(sel.element(self.IS_CHECKABLE, root=leaf))
def check_node(self, *path, **kwargs):
"""Expands the passed path and checks a checkbox that is located at the node."""
return self.check_uncheck_node(True, *path, **kwargs)
def uncheck_node(self, *path, **kwargs):
"""Expands the passed path and unchecks a checkbox that is located at the node."""
return self.check_uncheck_node(False, *path, **kwargs)
def node_checked(self, *path, **kwargs):
"""Check if a checkbox is checked on the node in that path."""
leaf = self.expand_path(*path, **kwargs)
if not self.is_checkable(leaf):
return False
return self.is_checked(leaf)
def find_path_to(self, target, exact=False):
""" Method used to look up the exact path to an item we know only by its regexp or partial
description.
Expands whole tree during the execution.
Args:
target: Item searched for. Can be regexp made by
:py:func:`re.compile <python:re.compile>`,
otherwise it is taken as a string for `in` matching.
exact: Useful in string matching. If set to True, it matches the exact string.
Default is False.
Returns: :py:class:`list` with path to that item.
"""
if not isinstance(target, re._pattern_type):
if exact:
target = re.compile(r"^{}$".format(re.escape(str(target))))
else:
target = re.compile(r".*?{}.*?".format(re.escape(str(target))))
def _find_in_tree(t, p=None):
if t is None:
return
if p is None:
p = []
for item in t:
if isinstance(item, list):
if target.match(item[0]) is None:
subtree = _find_in_tree(item[1], p + [item[0]])
if subtree is not None:
return subtree
else:
return p + [item[0]]
else:
if target.match(item) is not None:
return p + [item]
else:
return
result = _find_in_tree(self.read_contents())
if result is None:
raise NameError("{} not found in tree".format(target.pattern))
else:
return result
@fill.method((BootstrapTreeview, Sequence))
def _fill_bstree_seq(tree, values):
if not values:
return None
try:
if isinstance(values[0], types.StringTypes):
tree.click_path(*values)
elif isinstance(values[0], Iterable):
for check in values:
tree.check_uncheck_node(check[1], *check[0])
except IndexError:
tree.click_path(*values)
class Tree(Pretty):
""" A class directed at CFME Tree elements
The Tree class aims to deal with all kinds of CFME trees
Args:
locator: This is a locator object pointing to the ``<ul>`` element which contains the rest
of the table.
Returns: A :py:class:`Tree` object.
A Tree object is set up by using a locator which contains the node elements. This element
will usually be a ``<ul>`` in the case of a Dynatree.
Usage:
tree = web_ui.Tree((By.XPATH, '//table//tr[@title="Datastore"]/../..'))
The path can then be navigated to return the last object in the path list, like so::
tree.click_path('Automation', 'VM Lifecycle Management (VMLifecycle)',
'VM Migrate (Migrate)')
Each path element will be expanded along the way, but will not be clicked.
When used in a :py:class:`Form`, a list of path tuples is expected in the form fill data.
The paths will be passed individually to :py:meth:`Tree.check_node`::
form = Form(fields=[
('tree_field', List(locator)),
])
form_fill_data = {
'tree_field': [
('Tree Node', 'Value'),
('Tree Node', 'Branch Node', 'Value'),
]
]
Note: Dynatrees, rely on a ``<ul><li>`` setup. We class a ``<li>`` as a node.
"""
pretty_attrs = ['locator']
def __init__(self, locator):
self.locator = locator
@cached_property
def tree_id(self):
if isinstance(self.locator, basestring) and re.match(r"^[a-zA-Z0-9_-]+$", self.locator):
return self.locator
else:
el = sel.element(self.locator)
tag = sel.tag(el)
tree_id = None
if tag == "ul":
try:
parent = sel.element("..", root=el)
id_attr = sel.get_attribute(parent, "id")
if id_attr:
tree_id = id_attr
except sel.NoSuchElementException:
pass
elif tag == "div":
tree_id = sel.get_attribute(el, "id") or None
else:
raise ValueError("Unknown element ({}) passed to the Tree!".format(tag))
if tree_id is None:
raise ValueError("Could not retrieve the id for Tree {}".format(repr(tree_id)))
else:
return tree_id
def locate(self):
return "#{}".format(self.tree_id)
def root_el(self):
return sel.element(self)
def _get_tag(self):
if getattr(self, 'tag', None) is None:
self.tag = sel.tag(self)
return self.tag
def read_contents(self, by_id=False):
result = False
while result is False:
sel.wait_for_ajax()
result = sel.execute_script(
"{} return read_tree(arguments[0], arguments[1]);".format(js.read_tree),
self.locate(),
by_id)
return result
def expand_path(self, *path, **kwargs):
""" Exposes a path.
Args:
*path: The path as multiple positional string arguments denoting the course to take.
Keywords:
by_id: Whether to match ids instead of text.
Returns: The leaf web element.
"""
by_id = kwargs.pop("by_id", False)
result = False
# Ensure we pass str to the javascript. This handles objects that represent themselves
# using __str__ and generally, you should only pass str because that is what makes sense
path = castmap(str, path)
# We sometimes have to wait for ajax. In that case, JS function returns false
# Then we repeat and wait. It does not seem completely possible to wait for the data in JS
# as it runs on one thread it appears. So this way it will try to drill multiple times
# each time deeper and deeper :)
while result is False:
sel.wait_for_ajax()
try:
result = sel.execute_script(
"{} return find_leaf(arguments[0],arguments[1],arguments[2]);".format(
js.find_leaf),
self.locate(),
path,
by_id)
except sel.WebDriverException as e:
text = str(e)
match = re.search(r"TREEITEM /(.*?)/ NOT FOUND IN THE TREE", text)
if match is not None:
item = match.groups()[0]
raise exceptions.CandidateNotFound(
{'message': "{}: could not be found in the tree.".format(item),
'path': path,
'cause': e})
match = re.search(r"^CANNOT FIND TREE /(.*?)/$", text)
if match is not None:
tree_id = match.groups()[0]
raise exceptions.TreeNotFound(
"Tree {} / {} not found.".format(tree_id, self.locator))
# Otherwise ...
raise
return result
def click_path(self, *path, **kwargs):
""" Exposes a path and then clicks it.
Args:
*path: The path as multiple positional string arguments denoting the course to take.
Keywords:
by_id: Whether to match ids instead of text.
Returns: The leaf web element.
"""
# Ensure we pass str to the javascript. This handles objects that represent themselves
# using __str__ and generally, you should only pass str because that is what makes sense
path = castmap(str, path)
leaf = self.expand_path(*path, **kwargs)
logger.info("Path %r yielded menuitem %r", path, sel.text(leaf))
if leaf is not None:
sel.wait_for_ajax()
sel.click(leaf)
return leaf
@classmethod
def browse(cls, tree, *path):
"""Browse through tree via path.
If node not found, raises exception.
If the browsing reached leaf(str), returns True if also the step was last, otherwise False.
If the result of the path is a subtree, it is returned.
Args:
tree: List with tree.
*path: Path to browse.
"""
# Ensure we pass str to the javascript. This handles objects that represent themselves
# using __str__ and generally, you should only pass str because that is what makes sense
path = castmap(str, path)
current = tree
for i, step in enumerate(path, start=1):
for node in current:
if isinstance(node, list):
if node[0] == step:
current = node[1]
break
else:
if node == step:
return i == len(path)
else:
raise Exception("Could not find node {}".format(step))
return current
@classmethod
def flatten_level(cls, tree):
"""Extracts just node names from current tree (top).
It makes:
.. code-block:: python
["asd", "fgh", ("ijk", [...]), ("lmn", [...])]
to
.. code-block:: python
["asd", "fgh", "ijk", "lmn"]
Useful for checking of contents of current tree level
"""
return map(lambda item: item[0] if isinstance(item, list) else item, tree)
def find_path_to(self, target, exact=False):
""" Method used to look up the exact path to an item we know only by its regexp or partial
description.
Expands whole tree during the execution.
Args:
target: Item searched for. Can be regexp made by
:py:func:`re.compile <python:re.compile>`,
otherwise it is taken as a string for `in` matching.
exact: Useful in string matching. If set to True, it matches the exact string.
Default is False.
Returns: :py:class:`list` with path to that item.
"""
if not isinstance(target, re._pattern_type):
if exact:
target = re.compile(r"^{}$".format(re.escape(str(target))))
else:
target = re.compile(r".*?{}.*?".format(re.escape(str(target))))
def _find_in_tree(t, p=None):
if p is None:
p = []
for item in t:
if isinstance(item, list):
if target.match(item[0]) is None:
subtree = _find_in_tree(item[1], p + [item[0]])
if subtree is not None:
return subtree
else:
return p + [item[0]]
else:
if target.match(item) is not None:
return p + [item]
else:
return None
result = _find_in_tree(self.read_contents())
if result is None:
raise NameError("{} not found in tree".format(target.pattern))
else:
return result
class CheckboxTree(Tree):
"""Tree that has a checkbox on each node, adds methods to check/uncheck them"""
node_checkbox = "../span[@class='dynatree-checkbox']"
def _is_checked(self, leaf):
return 'dynatree-selected' in \
sel.get_attribute(sel.element("..", root=leaf), 'class')
def _check_uncheck_node(self, path, check=False):
""" Checks or unchecks a node.
Args:
*path: The path as multiple positional string arguments denoting the course to take.
check: If ``True``, the node is checked, ``False`` the node is unchecked.
"""
leaf = self.expand_path(*path)
cb = sel.element(self.node_checkbox, root=leaf)
if check is not self._is_checked(leaf):
sel.click(cb)
def check_node(self, *path):
""" Convenience function to check a node
Args:
*path: The path as multiple positional string arguments denoting the course to take.
"""
self._check_uncheck_node(path, check=True)
def uncheck_node(self, *path):
""" Convenience function to uncheck a node
Args:
*path: The path as multiple positional string arguments denoting the course to take.
"""
self._check_uncheck_node(path, check=False)
@fill.method((Tree, Sequence))
def _fill_tree_seq(tree, values):
tree.click_path(*values)
@sel.select.method((CheckboxTree, Sequence))
@fill.method((CheckboxTree, Sequence))
def _select_chkboxtree_seq(cbtree, values):
"""values should be a list of tuple pairs, where the first item is the
path to select, and the second is whether to check or uncheck.
Usage:
select(cbtree, [(['Foo', 'Bar'], False),
(['Baz'], True)])
"""
for (path, to_select) in values:
if to_select:
cbtree.check_node(*path)
else:
cbtree.uncheck_node(*path)
class InfoBlock(Pretty):
DETAIL = "detail"
FORM = "form"
PF = "patternfly"
_TITLE_CACHE = {}
pretty_attrs = ["title"]
def __new__(cls, title, detail=None):
# Caching
if title not in cls._TITLE_CACHE:
cls._TITLE_CACHE[title] = super(InfoBlock, cls).__new__(cls)
cls._TITLE_CACHE[title].__init__(title)
instance = cls._TITLE_CACHE[title]
if detail is None:
return instance
else:
return instance.member(detail)
def __init__(self, title):
if all(map(lambda a: hasattr(self, a), ["title", "_type", "_member_cache"])):
return
self.title = title
self._type = None
self._member_cache = {}
@property
def type(self):
if self._type is None:
self.root # To retrieve it
return self._type
@property
def root(self):
possible_locators = [
# Detail type
'//table//th[contains(normalize-space(.), "{}")]/../../../..'.format(
self.title),
# Form type
(
'//*[p[@class="legend"][contains(normalize-space(.), "{}")] and table/tbody/tr/td['
'contains(@class, "key")]]'.format(self.title)
),
# Newer Form type (master.20150311020845_547fd06 onwards)
(
'//*[h3[contains(normalize-space(.), "{}")] and table/tbody/tr/td['
'contains(@class, "key")]]'.format(self.title)
),
# Newer Form type used in AC tagging:
(
'//h3[contains(normalize-space(.), "{}")]/following-sibling::div/table/tbody/tr/td['
'contains(@class, "key")]/../../../..'.format(self.title)
),
# The root element must contain table element because listaccordions were caught by the
# locator. It used to be fieldset but it seems it can be really anything
# And here comes a new one, this time no table. (eg. 5.5.0.7 Configuration/About)
(
'//*[h3[contains(normalize-space(.), "{}")] and '
'div[contains(@class, "form-horizontal")]/div/label]'.format(self.title)
)
]
found = sel.elements("|".join(possible_locators))
if not found:
raise exceptions.BlockTypeUnknown("The block type requested is unknown")
root_el = found[0]
if sel.elements("./table/tbody/tr/td[contains(@class, 'key')]", root=root_el):
self._type = self.FORM
elif sel.elements("./div[contains(@class, 'form-horizontal')]/div/label", root=root_el):
self._type = self.PF
else:
self._type = self.DETAIL
return root_el
def member(self, name):
if name not in self._member_cache:
self._member_cache[name] = self.Member(self, name)
return self._member_cache[name]
def by_member_icon(self, icon):
"""In case you want to find the item by icon in the value field (like OS infra diff.)"""
if self._type == self.PF:
raise NotImplementedError(
"I haven't implemented icons+patternfly infoblock yet, so fix me if you see this.")
l = ".//table/tbody/tr/td[2]/img[contains(@src, {})]/../../td[1]".format(quoteattr(icon))
return self.member(sel.text(l))
def __call__(self, member):
"""A present for @smyers"""
return self.member(member)
##
#
# Shortcuts for old-style access
#
@classmethod
def text(cls, *args, **kwargs):
return cls(*args, **kwargs).text
@classmethod
def element(cls, *args, **kwargs):
return cls(*args, **kwargs).element
@classmethod
def elements(cls, *args, **kwargs):
return cls(*args, **kwargs).elements
@classmethod
def icon_href(cls, *args, **kwargs):
return cls(*args, **kwargs).icon_href
@classmethod
def container(cls, args, **kwargs):
try:
return sel.element(cls(*args, **kwargs).container)
except sel_exceptions.NoSuchElementException:
raise exceptions.ElementOrBlockNotFound(
"Either the element of the block could not be found")
class Member(Pretty):
pretty_attrs = "name", "ib"
def __init__(self, ib, name):
self.ib = ib
self.name = name
@property
def pair_locator(self):
if self.ib.type == InfoBlock.DETAIL:
return './/table/tbody/tr/td[1][@class="label"][normalize-space(.)="{}"]/..'.format(
self.name)
elif self.ib.type == InfoBlock.FORM:
return './/table/tbody/tr/td[1][@class="key"][normalize-space(.)="{}"]/..'.format(
self.name)
elif self.ib.type == InfoBlock.PF:
return (
'./div[contains(@class, "form-horizontal")]'
'/div[label[normalize-space(.)="{}"]]/div'.format(self.name))
@property
def pair(self):
return sel.element(self.pair_locator, root=self.ib.root)
@property
def container(self):
if self.ib.type == InfoBlock.PF:
# Because we get the element directly, not the two tds
return self.pair
else:
return sel.element("./td[2]", root=self.pair)
def locate(self):
return self.container
@property
def elements(self):
return sel.elements("./*", root=self.container)
@property
def element(self):
return self.elements[0]
@property
def text(self):
return sel.text(self.container).encode("utf-8").strip()
@property
def icon_href(self):
try:
return sel.get_attribute(sel.element("./img", root=self.container), "src")
except sel_exceptions.NoSuchElementException:
return None
@property
def title(self):
return sel.get_attribute(self.pair, "title") or None
@fill.method((InfoBlock, Sequence))
def _ib_seq(ib, i):
for item in i:
sel.click(ib.member(item))
@fill.method((InfoBlock, basestring))
def _ib_str(ib, s):
fill([s])
@fill.method((InfoBlock.Member, bool))
def _ib_m_seq(member, b):
if b:
sel.click(member)
class Quadicon(Pretty):
"""
Represents a single quadruple icon in the CFME UI.
A Quadicon contains multiple quadrants. These are accessed via attributes.
The qtype is currently one of the following and determines which attribute names
are present. They are mapped internally and can be reassigned easily if the UI changes.
A Quadicon is used by defining the name of the icon and the type. After that, it can be used
to obtain the locator of the Quadicon, or query its quadrants, via attributes.
Args:
name: The label of the icon.
qtype: The type of the quad icon. By default it is ``None``, therefore plain quad without any
retrievable data usable for selecting/clicking.
Usage:
qi = web_ui.Quadicon('hostname.local', 'host')
qi.creds
click(qi)
.. rubric:: Known Quadicon Types and Attributes
* **host** - *from the infra/host page* - has quads:
* a. **no_vm** - Number of VMs
* b. **state** - The current state of the host
* c. **vendor** - The vendor of the host
* d. **creds** - If the creds are valid
* **infra_prov** - *from the infra/providers page* - has quads:
* a. **no_host** - Number of hosts
* b. *Blank*
* c. **vendor** - The vendor of the provider
* d. **creds** - If the creds are valid
* **vm** - *from the infra/virtual_machines page* - has quads:
* a. **os** - The OS of the vm
* b. **state** - The current state of the vm
* c. **vendor** - The vendor of the vm's host
* d. **no_snapshot** - The number of snapshots
* g. **policy** - The state of the policy
* **cloud_prov** - *from the cloud/providers page* - has quads:
* a. **no_instance** - Number of instances
* b. **no_image** - Number of machine images
* c. **vendor** - The vendor of the provider
* d. **creds** - If the creds are valid
* **instance** - *from the cloud/instances page* - has quads:
* a. **os** - The OS of the instance
* b. **state** - The current state of the instance
* c. **vendor** - The vendor of the instance's host
* d. **no_snapshot** - The number of snapshots
* g. **policy** - The state of the policy
* **datastore** - *from the infra/datastores page* - has quads:
* a. **type** - File system type
* b. **no_vm** - Number of VMs
* c. **no_host** - Number of hosts
* d. **avail_space** - Available space
* **cluster** - *from the infra/cluster page* - has no quads
* **resource_pool** - *from the infra/resource_pool page* - has no quads
* **stack** - *from the clouds/stacks page* - has no quads
Returns: A :py:class:`Quadicon` object.
"""
pretty_attrs = ['_name', '_qtype']
QUADS = {
"host": {
"no_vm": ("a", 'txt'),
"state": ("b", 'img'),
"vendor": ("c", 'img'),
"creds": ("d", 'img'),
},
"infra_prov": {
"no_host": ("a", 'txt'),
"vendor": ("c", 'img'),
"creds": ("d", 'img'),
},
"vm": {
"os": ("a", 'img'),
"state": ("b", 'img'),
"vendor": ("c", 'img'),
"no_snapshot": ("d", 'txt'),
"policy": ("g", 'img'),
},
"cloud_prov": {
"no_vm": ("a", 'txt'),
"no_image": ("b", 'txt'),
"vendor": ("b", 'img'),
"creds": ("d", 'img'),
},
"instance": {
"os": ("a", 'img'),
"state": ("b", 'img'),
"vendor": ("c", 'img'),
"no_snapshot": ("d", 'txt'),
"policy": ("g", 'img'),
},
"stack": {},
"datastore": {
"type": ("a", 'img'),
"no_vm": ("b", 'txt'),
"no_host": ("c", 'txt'),
"avail_space": ("d", 'img'),
},
"cluster": {},
"resource_pool": {},
"template": {
"os": ("a", 'img'),
"state": ("b", 'img'),
"vendor": ("c", 'img'),
"no_snapshot": ("d", 'txt'),
},
"image": {
"os": ("a", 'img'),
"state": ("b", 'img'),
"vendor": ("c", 'img'),
"no_snapshot": ("d", 'txt'),
},
"middleware": {}, # Middleware quads have no fields
"object_store": {},
None: {}, # If you just want to find the quad and not mess with data
}
def __init__(self, name, qtype=None):
self._name = name
self.qtype = qtype
def __repr__(self):
return '{}({!r}, {!r})'.format(type(self).__name__, self._name, self.qtype)
@property
def qtype(self):
return self._qtype
@qtype.setter
def qtype(self, value):
assert value in self.QUADS
self._qtype = value
@property
def _quad_data(self):
return self.QUADS[self.qtype]
def checkbox(self):
""" Returns: a locator for the internal checkbox for the quadicon"""
return "//input[@type='checkbox' and ../../..//a[{}]]".format(self.a_cond)
@property
def exists(self):
try:
self.locate()
return True
except sel.NoSuchElementException:
return False
@property
def a_cond(self):
if self.qtype == "middleware":
return "contains(normalize-space(@title), {name})"\
.format(name=quoteattr('Name: {}'.format(self._name)))
else:
return "@title={name} or @data-original-title={name}".format(name=quoteattr(self._name))
def locate(self):
""" Returns: a locator for the quadicon anchor"""
try:
return sel.move_to_element(
'div/a',
root="//div[contains(@id, 'quadicon') and ../../..//a[{}]]".format(self.a_cond))
except sel.NoSuchElementException:
quads = sel.elements("//div[contains(@id, 'quadicon')]/../../../tr/td/a")
if not quads:
raise sel.NoSuchElementException("Quadicon {} not found. No quads present".format(
self._name))
else:
quad_names = [self._get_title(quad) for quad in quads]
raise sel.NoSuchElementException(
"Quadicon {} not found. These quads are present:\n{}".format(
self._name, ", ".join(quad_names)))
def _locate_quadrant(self, corner):
""" Returns: a locator for the specific quadrant"""
return "//div[contains(@class, {}) and ../../../..//a[{}]]".format(
quoteattr("{}72".format(corner)), self.a_cond)
def __getattr__(self, name):
""" Queries the quadrants by name
Args:
name: The name of the quadrant identifier, as defined above.
Returns: A string containing a representation of what is in the quadrant.
"""
if name in self._quad_data:
corner, rtype = self._quad_data[name]
locator = self._locate_quadrant(corner)
# We have to have a try/except here as some quadrants
# do not exist if they have no data, e.g. current_state in a host
# with no credentials.
try:
el = sel.element(locator)
except sel_exceptions.NoSuchElementException:
return None
if rtype == 'txt':
return el.text
if rtype == 'img':
try:
img_el = sel.element(
'.//img|.//div[contains(@style, "background-image")]',
root=el)
except sel_exceptions.NoSuchElementException:
raise NoSuchElementException(
('Could not find the image field in quadrant {} of {!r}. '
'This may be an error or a UI change.').format(corner, self))
tag = sel.tag(img_el)
if tag == 'img':
img_name = sel.get_attribute(img_el, 'src')
elif tag == 'div':
style = sel.get_attribute(img_el, 'style')
match = re.search(r'background-image:\s*url\("([^"]+)"\)', style)
if not match:
raise ValueError(
'Could not find the image url in style {!r} of {!r} quadrant {}'.format(
style, self, corner))
img_name = match.groups()[0]
else:
raise ValueError(
'Unknown tag <{}> when parsing quadicon {!r}, quadrant {}'.format(
tag, self, corner))
path, filename = os.path.split(img_name)
root, ext = os.path.splitext(filename)
return root
else:
return object.__getattribute__(self, name)
def __str__(self):
return self.locate()
@classmethod
def _get_title(cls, el):
title = sel.get_attribute(el, "title")
if title is not None:
return title
else:
return sel.get_attribute(el, "data-original-title")
@classmethod
def all(cls, qtype=None, this_page=False):
"""Allows iteration over Quadicons.
Args:
qtype: Quadicon type. Refer to the constructor for reference.
this_page: Whether to look for Quadicons only on current page (do not list pages).
Returns: :py:class:`list` of :py:class:`Quadicon`
"""
from cfme.web_ui import paginator # Prevent circular imports
if this_page:
pages = (None, ) # Single, current page. Since we dont care about the value, using None
else:
pages = paginator.pages()
for page in pages:
for href in sel.elements("//div[contains(@id, 'quadicon')]/../../../tr/td/a"):
yield cls(cls._get_title(href), qtype)
@classmethod
def first(cls, qtype=None):
return cls(cls.get_first_quad_title(), qtype=qtype)
@staticmethod
def select_first_quad():
fill("//div[contains(@id, 'quadicon')]/../..//input", True)
@staticmethod
def get_first_quad_title():
first_quad = "//div[contains(@id, 'quadicon')]/../../../tr/td/a"
title = sel.get_attribute(first_quad, "title")
if title:
return title
else:
return sel.get_attribute(first_quad, "data-original-title") or "" # To ensure str
@classmethod
def any_present(cls):
try:
cls.get_first_quad_title()
except NoSuchElementException:
return False
except AttributeError:
# This is needed so that if there is no browser, we fail nicely, this in turn is
# needed to make the docs not error.
return False
else:
return True
@property
def name(self):
""" Returns name of the quadicon."""
return self._name
@property
def check_for_single_quadrant_icon(self):
""" Checks if the quad icon is a single quadrant icon."""
for quadrant_name in self._quad_data.iterkeys():
# These quadrant will be displayed if it is a regular quad
quadrant_id = self._quad_data[quadrant_name][0] # It is a tuple
if sel.is_displayed(self._locate_quadrant(quadrant_id)):
return False
return sel.is_displayed(self._locate_quadrant("e")) # Image has only 'e'
@property
def href(self):
return self.locate().get_attribute('href')
class DHTMLSelect(Select):
"""
A special Select object for CFME's icon enhanced DHTMLx Select elements.
Args:
loc: A locator.
Returns a :py:class:`cfme.web_ui.DHTMLSelect` object.
"""
@staticmethod
def _log(meth, val=None):
if val:
val_string = " with value {}".format(val)
logger.debug('Filling in DHTMLSelect using (%s)%s', meth, val_string)
def _get_select_name(self):
""" Get's the name reference of the element from its hidden attribute.
"""
root_el = sel.element(self)
el = sel.element("div/input[2]", root=root_el)
name = sel.get_attribute(el, 'name')
return name
@property
def all_selected_options(self):
""" Returns all selected options.
Note: Since the DHTML select can only have one option selected at a time, we
simple return the first element (the only element).
Returns: A Web element.
"""
return [self.first_selected_option]
@property
def first_selected_option(self):
""" Returns the first selected option in the DHTML select
Note: In a DHTML select, there is only one option selectable at a time.
Returns: A webelement.
"""
name = self._get_select_name()
return browser().execute_script(
'return {}.getOptionByIndex({}}.getSelectedIndex()).content'.format(name, name))
@property
def options(self):
""" Returns a list of options of the select as webelements.
Returns: A list of Webelements.
"""
name = self._get_select_name()
return browser().execute_script('return {}.DOMlist.children'.format(name))
def select_by_index(self, index, _cascade=None):
""" Selects an option by index.
Args:
index: The select element's option by index.
"""
name = self._get_select_name()
if index is not None:
if not _cascade:
self._log('index', index)
browser().execute_script('{}.selectOption({})'.format(name, index))
def select_by_visible_text(self, text):
""" Selects an option by visible text.
Args:
text: The select element option's visible text.
"""
name = self._get_select_name()
if text is not None:
self._log('visible_text', text)
value = browser().execute_script(
'return {}.getOptionByLabel("{}").value'.format(name, text))
self.select_by_value(value, _cascade=True)
def select_by_value(self, value, _cascade=None):
""" Selects an option by value.
Args:
value: The select element's option value.
"""
name = self._get_select_name()
if value is not None:
if not _cascade:
self._log('value', value)
index = browser().execute_script('return {}.getIndexByValue("{}")'.format(name, value))
self.select_by_index(index, _cascade=True)
def locate(self):
return sel.move_to_element(self._loc)
@sel.select.method((DHTMLSelect, basestring))
def select_dhtml(dhtml, s):
dhtml.select_by_visible_text(s)
class Filter(Form):
""" Filters requests pages
This class inherits Form as its base and adds a few methods to assist in filtering
request pages.
Usage:
f = Filter(fields=[
('type', Select('//select[@id="type_choice"]')),
('approved', Input("state_choice__approved")),
('denied', Input"state_choice__denied")),
('pending_approval', Input("state_choice__pending_approval")),
('date', Select('//select[@id="time_period"]')),
('reason', Input("reason_text")),
])
f.apply_filter(type="VM Clone", approved=False,
pending_approval=False, date="Last 24 Hours", reason="Just Because")
"""
buttons = {
'default_off': '//div[@id="buttons_off"]/li/a/img[@alt="Set filters to default"]',
'default_on': '//div[@id="buttons_on"]/li/a/img[@alt="Set filters to default"]',
'apply': '//div[@id="buttons_on"]//a[@title="Apply the selected filters"]',
'reset': '//div[@id="buttons_on"]//a[@title="Reset filter changes"]'
}
def default_filter(self):
""" Method to reset the filter back to defaults.
"""
sel.click(self.buttons['default_off'])
sel.click(self.buttons['default_on'])
def reset_filter(self):
""" Method to reset the changes to the filter since last applying.
"""
sel.click(self.buttons['reset'])
def apply_filter(self, **kwargs):
""" Method to apply a filter.
First resets the filter to default and then applies the filter.
Args:
**kwargs: A dictionary of form elements to fill and their values.
"""
self.default_filter()
self.fill(kwargs)
sel.click(self.buttons['apply'])
class MultiSelect(Region):
"""Represents a UI widget where there are two select boxes, one with
possible selections, and another with selected items. Has two
arrow buttons to move items between the two"""
def __init__(self,
available_select=None,
selected_select=None,
select_arrow=None,
deselect_arrow=None):
self.available_select = available_select
self.selected_select = selected_select
self.select_arrow = select_arrow
self.deselect_arrow = deselect_arrow
@sel.select.method((MultiSelect, Sequence))
def select_multiselect(ms, values):
sel.select(ms.available_select, values)
sel.click(ms.select_arrow)
@fill.method((MultiSelect, Sequence))
def fill_multiselect(ms, items):
sel.select(ms, items)
class UpDownSelect(Region):
"""Multiselect with two arrows (up/down) next to it. Eg. in AE/Domain priority selection.
Args:
select_loc: Locator for the select box (without Select element wrapping)
up_loc: Locator of the Move Up arrow.
down_loc: Locator with Move Down arrow.
"""
def __init__(self, select_loc, up_loc, down_loc):
super(UpDownSelect, self).__init__(locators=dict(
select=Select(select_loc, multi=True),
up=up_loc,
down=down_loc,
))
def get_items(self):
return map(lambda el: el.text.encode("utf-8"), self.select.options)
def move_up(self, item):
item = str(item)
assert item in self.get_items()
self.select.deselect_all()
sel.select(self.select, item)
sel.click(self.up)
def move_down(self, item):
item = str(item)
assert item in self.get_items()
self.select.deselect_all()
sel.select(self.select, item)
sel.click(self.down)
def move_top(self, item):
item = str(item)
assert item in self.get_items()
self.select.deselect_all()
while item != self.get_items()[0]:
sel.select(self.select, item)
sel.click(self.up)
def move_bottom(self, item):
item = str(item)
assert item in self.get_items()
self.select.deselect_all()
while item != self.get_items()[-1]:
sel.select(self.select, item)
sel.click(self.down)
@fill.method((UpDownSelect, Sequence))
def _fill_uds_seq(uds, seq):
seq = map(str, seq)
for item in reversed(seq): # reversed because every new item at top pushes others down
uds.move_top(item)
class ScriptBox(Pretty):
"""Represents a script box as is present on the customization templates pages.
This box has to be activated before keys can be sent. Since this can't be done
until the box element is visible, and some dropdowns change the element, it must
be activated "inline".
Args:
"""
pretty_attrs = ['locator']
def __init__(self, name=None, ta_locator="//textarea[contains(@id, 'method_data')]"):
self._name = name
self.ta_loc = ta_locator
@property
def name(self):
if not self._name:
self._name = version.pick({
version.LOWEST: 'miqEditor',
'5.5': 'ManageIQ.editor'})
return self._name
def get_value(self):
script = sel.execute_script('return {}.getValue();'.format(self.name))
script = script.replace('\\"', '"').replace("\\n", "\n")
return script
def workaround_save_issue(self):
# We need to fire off the handlers manually in some cases ...
sel.execute_script(
"{}._handlers.change.map(function(handler) {{ handler() }});".format(self.name))
sel.wait_for_ajax()
@fill.method((ScriptBox, Anything))
def fill_scriptbox(sb, script):
"""This function now clears and sets the ScriptBox.
"""
logger.info("Filling ScriptBox {} with\n{}".format(sb.name, script))
sel.execute_script('{}.setValue(arguments[0]);'.format(sb.name), script)
sel.wait_for_ajax()
sel.execute_script('{}.save();'.format(sb.name))
sel.wait_for_ajax()
class CheckboxSelect(Pretty):
"""Class used for filling those bunches of checkboxes I (@mfalesni) always hated to search for.
Can fill by values, text or both. To search the text for the checkbox, you have 2 choices:
* If the text can be got from parent's tag (like `<div><input type="checkbox">blablabla</div>`
where blablabla is the checkbox's description looked up), you can leave the
`text_access_func` unfilled.
* If there is more complicated layout and you don't mind a bit slower operation, you can pass
the text_access_func, which should be like `lambda checkbox_el: get_text_of(checkbox_el)`.
The checkbox `WebElement` is passed to it and the description text is the expected output
of the function.
Args:
search_root: Root element for checkbox search
text_access_func: Function returning descriptive text about passed CB element.
"""
pretty_attrs = ['_root']
def __init__(self, search_root, text_access_func=None):
self._root = search_root
self._access_func = text_access_func
@property
def checkboxes(self):
"""All checkboxes."""
return set(sel.elements(".//input[@type='checkbox']", root=sel.element(self._root)))
@property
def selected_checkboxes(self):
"""Only selected checkboxes."""
return {cb for cb in self.checkboxes if cb.is_selected()}
@property
def selected_values(self):
"""Only selected checkboxes' values."""
return {sel.get_attribute(cb, "value") for cb in self.selected_checkboxes}
@property
def unselected_checkboxes(self):
"""Only unselected checkboxes."""
return {cb for cb in self.checkboxes if not cb.is_selected()}
@property
def unselected_values(self):
"""Only unselected checkboxes' values."""
return {sel.get_attribute(cb, "value") for cb in self.unselected_checkboxes}
def checkbox_by_id(self, id):
"""Find checkbox's WebElement by id."""
return sel.element(
".//input[@type='checkbox' and @id='{}']".format(id), root=sel.element(self._root)
)
def select_all(self):
"""Selects all checkboxes."""
for cb in self.unselected_checkboxes:
sel.check(cb)
def unselect_all(self):
"""Unselects all checkboxes."""
for cb in self.selected_checkboxes:
sel.uncheck(cb)
def checkbox_by_text(self, text):
"""Returns checkbox's WebElement by searched by its text."""
if self._access_func is not None:
for cb in self.checkboxes:
txt = self._access_func(cb)
if txt == text:
return cb
else:
raise NameError("Checkbox with text {} not found!".format(text))
else:
# Has to be only single
return sel.element(
".//*[contains(., '{}')]/input[@type='checkbox']".format(text),
root=sel.element(self._root)
)
def check(self, values):
"""Checking function.
Args:
values: Dictionary with key=CB name, value=bool with status.
Look in the function to see.
"""
for name, value in values.iteritems():
if isinstance(name, sel.ByText):
sel.checkbox(self.checkbox_by_text(str(name)), value)
else:
sel.checkbox(self.checkbox_by_id(name), value)
@fill.method((CheckboxSelect, bool))
def fill_cb_select_bool(select, all_state):
if all_state is True:
return select.select_all()
else:
return select.unselect_all()
@fill.method((CheckboxSelect, list))
@fill.method((CheckboxSelect, set))
def fill_cb_select_set(select, names):
return select.check({k: True for k in names})
@fill.method((CheckboxSelect, Mapping))
def fill_cb_select_dictlist(select, dictlist):
return select.check(dictlist)
@fill.method((CheckboxSelect, basestring))
@fill.method((CheckboxSelect, sel.ByText))
def fill_cb_select_string(select, cb):
return fill(select, {cb})
class ShowingInputs(Pretty):
"""This class abstracts out as a container of inputs, that appear after preceeding was filled.
Args:
*locators: In-order-of-display specification of locators.
Keywords:
min_values: How many values are required (Default: 0)
"""
pretty_attrs = ['locators', 'min_values']
def __init__(self, *locators, **kwargs):
self._locators = locators
self._min = kwargs.get("min_values", 0)
def zip(self, with_values):
if len(with_values) < self._min:
raise ValueError("Not enough values provided ({}, expected {})".format(
len(with_values), self._min)
)
if len(with_values) > len(self._locators):
raise ValueError("Too many values provided!")
return zip(self._locators, with_values)
def __getitem__(self, i):
"""To delegate access to the separate locators"""
return self._locators[i]
@fill.method((ShowingInputs, Sequence))
def _fill_showing_inputs_seq(si, i):
for loc, val in si.zip(i):
fill(loc, val)
@fill.method((ShowingInputs, basestring))
def _fill_showing_inputs_str(si, s):
fill(si, [s])
class MultiFill(object):
"""Class designed to fill the same value to multiple fields
Args:
*fields: The fields where the value will be mirrored
"""
def __init__(self, *fields):
self.fields = fields
@fill.method((MultiFill, object))
def _fill_multi_obj(mf, o):
for field in mf.fields:
fill(field, o)
class DriftGrid(Pretty):
""" Class representing the table (grid) specific to host drift analysis comparison page
"""
def __init__(self, loc="//div[@id='compare-grid']"):
self.loc = loc
def get_cell(self, row_text, col_index):
""" Finds cell element of the grid specified by column index and row text
Args:
row_text: Title text of the cell's row
col_index: Column index of the cell, starting with 0 for 1st data-containing column
Note:
`col_index` of 0 is used for the 2nd actual column in the drift grid, because
the 1st column does not contain headers, only row descriptions.
Returns:
Selenium element of the cell.
"""
self.expand_all_sections()
cell_loc = ".//th[contains(normalize-space(.), '{}')]/../td[{}]".format(row_text,
col_index + 1)
cell = sel.element(cell_loc, root=self.loc)
return cell
def cell_indicates_change(self, row_text, col_index):
""" Finds out if a cell, specified by column index and row text, indicates change
Args:
row_text: Title text of the cell's row
col_index: Column index of the cell
Note:
`col_index` of 0 is used for the 2nd actual column in the drift grid, because
the 1st column does not contain headers, only row descriptions.
Returns:
``True`` if there is a change present, ``False`` otherwise
"""
cell = self.get_cell(row_text, col_index)
# Cell either contains an image
try:
cell_img = sel.element(".//i | .//img", root=cell)
return sel.get_attribute(cell_img, "title") == 'Changed from previous'
# or text
except NoSuchElementException:
if 'color: rgb(33, 160, 236)' in sel.get_attribute(cell, 'style'):
return True
return False
def expand_all_sections(self):
""" Expands all sections to make the row elements found therein available
"""
while True:
# We need to do this one by one because the DOM changes on every expansion
try:
el = sel.element(
'.//div/span[contains(@class, "toggle") and contains(@class, "expand")]',
root=self.loc)
sel.click(el)
except NoSuchElementException:
break
class ButtonGroup(object):
def __init__(self, key, fieldset=None):
""" A ButtonGroup is a set of buttons next to each other, as is used on the DefaultViews
page.
Args:
key: The name of the key field text before the button group.
"""
self.key = key
self.fieldset = fieldset
@property
def _icon_tag(self):
if version.current_version() >= 5.6:
return 'i'
else:
return 'img'
@property
def _state_attr(self):
if version.current_version() >= 5.6:
return 'title'
else:
return 'alt'
@property
def locator(self):
attr = re.sub(r"&", "&", quoteattr(self.key)) # We don't need it in xpath
path = './/label[contains(@class, "control-label") and ' \
'normalize-space(.)={}]/..'.format(attr)
if self.fieldset:
fieldset = quoteattr(self.fieldset)
path = '//fieldset[./h3[normalize-space(.)={}]]/'.format(fieldset) + path
return path
def locate(self):
""" Moves to the element """
# Use the header locator as the overall table locator
return sel.move_to_element(self.locator)
@property
def locator_base(self):
if version.current_version() < "5.5":
return self.locator + "/td[2]"
else:
return self.locator + "/div"
@property
def active(self):
""" Returns the alt tag text of the active button in thr group. """
loc = sel.element(self.locator_base + '/ul/li[@class="active"]/{}'.format(self._icon_tag))
return loc.get_attribute(self._state_attr)
def status(self, alt):
""" Returns the status of the button identified by the Alt Text of the image. """
active_loc = self.locator_base + '/ul/li/{}[@{}="{}"]'.format(
self._icon_tag, self._state_attr, alt)
try:
sel.element(active_loc)
return True
except NoSuchElementException:
pass
inactive_loc = self.locator_base + '/ul/li/a/{}[@alt="{}"]'.format(self._icon_tag, alt)
try:
sel.element(inactive_loc)
return False
except NoSuchElementException:
pass
def choose(self, alt):
""" Sets the ButtonGroup to select the button identified by the alt text. """
if not self.status(alt):
inactive_loc = self.locator_base + '/ul/li/a/{}[@alt="{}"]'.format(self._icon_tag, alt)
sel.click(inactive_loc)
@fill.method((ButtonGroup, basestring))
def _fill_showing_button_group(tb, s):
tb.choose(s)
class ColorGroup(object):
def __init__(self, key):
""" A ColourGroup is a set of colour buttons next to each other, as is used on the DefaultViews
page.
Args:
key: The name of the key field text before the button group.
"""
self.key = key
self.locator = '//td[@class="key" and text()="{}"]/..'.format(self.key)
def locate(self):
""" Moves to the element """
# Use the header locator as the overall table locator
return sel.move_to_element(self.locator)
@property
def active(self):
""" Returns the alt tag text of the active button in thr group. """
loc = sel.element(self.locator + '/td[2]/div[contains(@title, "selected")]')
color = re.search('The (.*?) theme', loc.get_attribute('title')).groups()[0]
return color
def status(self, color):
""" Returns the status of the color button identified by the Title Text of the image. """
active_loc = self.locator + '/td[2]/div[contains(@title, "{}")' \
'and contains(@title, "selected")]'.format(color)
try:
sel.element(active_loc)
return True
except NoSuchElementException:
pass
inactive_loc = self.locator + '/td[2]/div[contains(@title, "{}")' \
'and contains(@title, "Click")]'.format(color)
try:
sel.element(inactive_loc)
return False
except NoSuchElementException:
pass
def choose(self, color):
""" Sets the ColorGroup to select the button identified by the title text. """
if not self.status(color):
inactive_loc = self.locator + '/td[2]/div[contains(@title, "{}")' \
'and contains(@title, "Click")]'.format(color)
sel.click(inactive_loc)
@fill.method((ColorGroup, basestring))
def _fill_showing_color_group(tb, s):
tb.choose(s)
class DynamicTable(Pretty):
"""A table that can add or remove the rows.
"""
pretty_attrs = "root_loc", "default_row_item"
ROWS = ".//tbody/tr[not(contains(@id, 'new_tr'))]"
DELETE_ALL = {
version.LOWEST: ".//tbody/tr/td/img[@alt='Delete']",
'5.6': './/tbody/tr/td/button/i[contains(@class, "minus")]'
}
def __init__(self, root_loc, default_row_item=None):
self.root_loc = root_loc
self.default_row_item = default_row_item
@property
def rows(self):
return map(lambda r_el: self.Row(self, r_el), sel.elements(self.ROWS, root=self.root_loc))
@cached_property
def header_names(self):
return map(sel.text, sel.elements(".//thead/tr/th", root=self.root_loc))
def click_add(self):
sel.click(sel.element(
".//tbody/tr[@id='new_tr']/td//img | .//tbody/tr[@id='new_tr']/td//i |"
" ./tbody/tr[@id='new_tr']/td/button",
root=self.root_loc))
def click_save(self):
if version.current_version() < "5.6":
sel.click(sel.element(
".//tbody/tr[@id='new_tr']/td//input[@type='image']", root=self.root_loc))
else:
# 5.6+ uses the same button.
self.click_add()
def delete_row(self, by):
pass
def clear(self):
while True:
buttons = sel.elements(self.DELETE_ALL)
if not buttons:
break
sel.click(buttons[0])
def add_row(self, data):
self.click_add()
editing_row = self.Row(self, ".//tbody/tr[@id='new_tr']")
fill(editing_row, data)
self.click_save()
class Row(object):
def __init__(self, table, root):
self.table = table
self.root = root
@property
def values(self):
cells = sel.elements("./td", root=self.root)
return dict(zip(self.table.header_names, map(sel.text, cells)))
@property
def inputs(self):
result = []
for cell in sel.elements("./td", root=self.root):
inputs = sel.elements("./input", root=cell)
if not inputs:
result.append(None)
else:
result.append(inputs[0])
return result
@property
def inputs_for_filling(self):
return dict(zip(self.table.header_names, self.inputs))
@fill.method((DynamicTable.Row, Mapping))
def _fill_dt_row_map(dtr, m):
for name, input in dtr.inputs_for_filling.iteritems():
fill(input, m.get(name))
@fill.method((DynamicTable.Row, Anything))
def _fill_dt_row_other(dtr, anything):
mapping_fields = [name for name in dtr.table.header_names if name.strip()]
if isinstance(anything, (list, tuple)) and len(anything) == len(mapping_fields):
# Create the dict and fill by dict
fill(dtr, dict(zip(mapping_fields, anything)))
else:
# Use the default field
if dtr.table.default_row_item is None:
raise Exception("Cannot fill table row with anything when we dont know the def. field")
fill(dtr, {dtr.table.default_row_item: anything})
@fill.method((DynamicTable, list))
def _fill_dt_list(dt, l, clear_before=False):
if clear_before:
dt.clear()
for item in l:
dt.add_row(item)
@fill.method((DynamicTable, Anything))
def _fill_dt_anything(dt, anything, **kwargs):
fill(dt, [anything], **kwargs)
fill.prefer((DynamicTable, Anything), (object, Mapping))
fill.prefer((DynamicTable.Row, Anything), (object, Mapping))
fill.prefer((Select, types.NoneType), (object, types.NoneType))
fill.prefer((DHTMLSelect, types.NoneType), (object, types.NoneType))
fill.prefer((object, types.NoneType), (Select, object))
class AngularSelect(Pretty):
BUTTON = "//button[@data-id='{}']"
pretty_attrs = ['_loc', 'none', 'multi', 'exact']
def __init__(self, loc, none=None, multi=False, exact=False):
self.none = none
if isinstance(loc, AngularSelect):
self._loc = loc._loc
else:
self._loc = self.BUTTON.format(loc)
self.multi = multi
self.exact = exact
def locate(self):
return sel.move_to_element(self._loc)
@property
def select(self):
return Select('select#{}'.format(self.did), multi=self.multi)
@property
def did(self):
return sel.element(self._loc).get_attribute('data-id')
@property
def is_broken(self):
return sel.is_displayed(self) and sel.is_displayed(self.select)
@property
def is_open(self):
el = sel.element(self._loc)
return el.get_attribute('aria-expanded') == "true"
def open(self):
sel.click(self._loc)
def select_by_visible_text(self, text):
if not self.is_open:
self.open()
if self.exact:
new_loc = self._loc + '/../div/ul/li/a[normalize-space(.)={}]'.format(
unescape(quoteattr(text)))
else:
new_loc = self._loc + '/../div/ul/li/a[contains(normalize-space(.), {})]'.format(
unescape(quoteattr(text)))
e = sel.element(new_loc)
sel.execute_script("arguments[0].scrollIntoView();", e)
sel.click(new_loc)
def select_by_value(self, value):
value = str(value) # Because what we read from the page is a string
options_map = [a.value for a in self.select.all_options]
index = options_map.index(value)
if not self.is_open:
self.open()
new_loc = self._loc + '/../div/ul/li[@data-original-index={}]'.format(index)
e = sel.element(new_loc)
sel.execute_script("arguments[0].scrollIntoView();", e)
sel.click(new_loc)
@property
def all_options(self):
return self.select.all_options
@property
def classes(self):
"""Combines class from the button and from select."""
return sel.classes(self) | sel.classes("select#{}".format(self.did))
@property
def options(self):
return self.select.options
@property
def first_selected_option(self):
new_loc = self._loc + '/span'
e = sel.element(new_loc)
text = e.text
for option in self.all_options:
if option.text == text:
return option
return None
@property
def first_selected_option_text(self):
new_loc = self._loc + '/span'
e = sel.element(new_loc)
text = e.text
return text
@fill.method((AngularSelect, sel.ByText))
@fill.method((AngularSelect, basestring))
def _fill_angular_string(obj, s):
if s:
obj.select_by_visible_text(s)
else:
return
@fill.method((AngularSelect, sel.ByValue))
def _fill_angular_value(obj, s):
if s.value:
obj.select_by_value(s.value)
else:
return
@fill.method((AngularSelect, list))
def _fill_angular_list(obj, l):
for i in l:
fill(obj, i)
class AngularCalendarInput(Pretty):
pretty_attrs = "input_name", "click_away_element"
def __init__(self, input_name, click_away_element):
self.input_name = input_name
self.click_away_element = click_away_element
@property
def input(self):
return Input(self.input_name, use_id=True)
@property
def clear_button(self):
return sel.element("../a/img", root=self.input)
def locate(self):
return self.input.locate()
def fill(self, value):
if isinstance(value, date):
value = '{}/{}/{}'.format(value.month, value.day, value.year)
else:
value = str(value)
try:
sel.click(self.input)
sel.set_text(self.input, value)
finally:
# To ensure the calendar itself is closed
sel.click(self.click_away_element)
def clear(self):
if sel.text(self.input).strip():
sel.click(self.clear_button)
@fill.method((AngularCalendarInput, Anything))
def _fill_angular_calendar_input(obj, a):
return obj.fill(a)
class EmailSelectForm(Pretty):
"""Class encapsulating the e-mail selector, eg. in Control/Alarms editing."""
fields = Region(locators=dict(
from_address=Input('from'),
user_emails={
version.LOWEST: Select("//select[@id='user_email']"),
"5.5": AngularSelect("user_email")},
manual_input=Input('email'),
add_email_manually={
version.LOWEST: "(//img | //i)[@title='Add' and contains(@onclick, 'add_email')]",
"5.5": "//div[@alt='Add']/i"}
))
@property
def to_emails(self):
"""Returns list of e-mails that are selected"""
return [
sel.text(el)
for el
in sel.elements("//a[contains(@href, 'remove_email')]")
]
@property
def user_emails(self):
"""Returns list of e-mail that users inside CFME have so that they can be selected"""
try:
return [
sel.get_attribute(el, "value")
for el
in self.fields.user_emails.options
if len(sel.get_attribute(el, "value").strip()) > 0
]
except NoSuchElementException: # It disappears when empty
return []
def remove_email(self, email):
"""Remove specified e-mail
Args:
email: E-mail to remove
"""
if email in self.to_emails:
sel.click("//a[contains(@href, 'remove_email')][normalize-space(.)='{}']".format(email))
return email not in self.to_emails
else:
return True
@to_emails.setter
def to_emails(self, emails):
"""Function for filling e-mails
Args:
emails: List of e-mails that should be filled. Any existing e-mails that are not in this
variable will be deleted.
"""
if isinstance(emails, basestring):
emails = [emails]
# Delete e-mails that have nothing to do here
for email in self.to_emails:
if email not in emails:
assert self.remove_email(email), "Could not remove e-mail '{}'".format(email)
# Add new
for email in emails:
if email in self.to_emails:
continue
if email in self.user_emails:
sel.select(self.fields.user_emails, sel.ByValue(email))
else:
fill(self.fields.manual_input, email)
sel.click(self.fields.add_email_manually)
assert email in self.to_emails, "Adding e-mail '{}' manually failed!".format(email)
@fill.method((EmailSelectForm, basestring))
@fill.method((EmailSelectForm, list))
@fill.method((EmailSelectForm, set))
@fill.method((EmailSelectForm, tuple))
def fill_email_select_form(form, emails):
form.to_emails = emails
class BootstrapSwitch(object):
def __init__(self, input_id):
"""A Bootstrap On/Off switch
Args:
input_id: The HTML ID of the input element associated with the checkbox
"""
self.input_id = input_id
self.loc_container = "//input[@id={}]/..".format(quoteattr(self.input_id))
self.on_off = "{}/span[contains(@class, 'bootstrap-switch-handle-{}')]".format(
self.loc_container, '{}')
def fill(self, val):
"""Convenience function"""
if val:
self.check()
else:
self.uncheck()
def check(self):
"""Checks the bootstrap box"""
el = sel.element(self.on_off.format("off"))
sel.click(el)
def uncheck(self):
"""Unchecks the bootstrap box"""
el = sel.element(self.on_off.format("on"))
sel.click(el)
def is_selected(self):
if sel.is_displayed("//div[contains(@class, 'bootstrap-switch-on')]{}"
.format(self.loc_container)):
return True
else:
return False
@property
def angular_help_block(self):
"""Returns the first visible angular helper text (like 'Required')."""
loc = ("{}/../../../../..//div[contains(@class, 'form-group has-error')]"
.format(self.loc_container))
try:
return sel.text(loc).strip()
except NoSuchElementException:
return None
@fill.method((BootstrapSwitch, bool))
def fill_bootstrap_switch(bs, val):
bs.fill(val)
class OldCheckbox(object):
def __init__(self, input_id):
"""An original HTML checkbox element
Args:
input_id: The HTML ID of the input element associated with the checkbox
"""
self.input_id = input_id
self.locator = "//input[@id={}]".format(quoteattr(input_id))
def fill(self, val):
"""
Checks or unchecks
Args:
value: The value the checkbox should represent as a bool (or None to do nothing)
Returns: Previous state of the checkbox
"""
if val is not None:
selected = self.is_selected()
if selected is not val:
logger.debug("Setting checkbox {} to {}".format(str(self.locator), str(val)))
sel.click(self._el)
return selected
def check(self):
"""Convenience function"""
self.fill(True)
def uncheck(self):
"""Convenience function"""
self.fill(False)
def _el(self):
return sel.move_to_element(self.locator)
def is_selected(self):
return self._el().is_selected()
@fill.method((OldCheckbox, bool))
def fill_oldcheckbox_switch(ob, val):
ob.fill(val)
class CFMECheckbox(Selector):
def __init__(self, input_id):
self.input_id = input_id
super(CFMECheckbox, self).__init__()
def decide(self):
ref_loc = "//input[@id={}]/../span" \
"[contains(@class, 'bootstrap-switch-label')]".format(quoteattr(self.input_id))
if sel.is_displayed(ref_loc):
return BootstrapSwitch(self.input_id)
else:
return OldCheckbox(self.input_id)
@fill.method((CFMECheckbox, bool))
def fill_cfmecheckbox_switch(ob, val):
ob.fill(val)
def breadcrumbs():
"""Returns a list of breadcrumbs names if names==True else return as elements.
Returns:
:py:class:`list` of breadcrumbs if they are present, :py:class:`NoneType` otherwise.
"""
elems = sel.elements('//ol[contains(@class, "breadcrumb")]/li')
return elems if elems else None
def breadcrumbs_names():
elems = breadcrumbs()
if elems:
return map(sel.text_sane, elems)
SUMMARY_TITLE_LOCATORS = [
'//h1'
]
SUMMARY_TITLE_LOCATORS = '|'.join(SUMMARY_TITLE_LOCATORS)
def summary_title():
"""Returns a title of the page.
Returns:
:py:class:`str` if present, :py:class:`NoneType` otherwise.
"""
try:
return sel.text_sane(SUMMARY_TITLE_LOCATORS)
except sel.NoSuchElementException:
return None
def browser_title():
"""Returns a title of the page.
Returns:
:py:class:`str` if present, :py:class:`NoneType` otherwise.
"""
try:
return browser().title.split(': ', 1)[1]
except IndexError:
return None
def controller_name():
"""Returns a title of the page.
Returns:
:py:class:`str` if present, :py:class:`NoneType` otherwise.
"""
return sel.execute_script('return ManageIQ.controller;')
def match_location(controller=None, title=None, summary=None):
"""Does exact match of passed data
Returns:
:py:class:`bool`
"""
result = []
if controller:
result.append(controller_name() == controller)
if title:
result.append(browser_title() == title)
if summary:
result.append((summary_title() == summary) or
(sel.is_displayed('//h3[normalize-space(.) = {}]'.format(quote(summary)))))
return all(result)
class StatusBox(object):
""" Status box as seen in containers overview page
Status box modelling.
Args:
name: The name of the status box as it appears in CFME, e.g. 'Nodes'
Returns: A StatusBox instance.
"""
def __init__(self, name):
self.name = name
def value(self):
if "_" in self.name:
self.name = self.name.split('_', 1)[-1]
elem_text = sel.text(
"//span[contains(@class,'card-pf-aggregate-status-count')]"
"/../../../../../div[contains(@status, 'objectStatus.{}')]".format(self.name.lower()))
match = re.search(r'\d+', elem_text)
return int(match.group())
| gpl-2.0 | 137,997,576,260,013,500 | 33.59466 | 103 | 0.573597 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/introspection.py | 1 | 4641 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.postgresql_psycopg2.introspection import \
DatabaseIntrospection
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
ignored_tables = DatabaseIntrospection.ignored_tables + [
'geography_columns',
'geometry_columns',
'raster_columns',
'spatial_ref_sys',
'raster_overviews',
]
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
field_types = [
('geometry', 'GeometryField'),
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
('geography', ('GeometryField', {'geography': True})),
]
postgis_types = {}
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
cursor = self.connection.cursor()
try:
for field_type in field_types:
cursor.execute(oid_sql, (field_type[0],))
for result in cursor.fetchall():
postgis_types[result[0]] = field_type[1]
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# initialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise GeoIntrospectionError
except GeoIntrospectionError:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| mit | -7,231,171,851,057,337,000 | 40.577982 | 95 | 0.563887 | false |
emdodds/LCAversions | timing.py | 1 | 3230 | #This file will time various versions of LCA
from __future__ import division
import numpy as np
import sklearn.preprocessing as skp
from timeit import default_timer as timer
from LCAnumpy import lca as lcan
from LCAfortran import lca as lcaf
from LCAnumbaprog import lca as lcag
def main():
"""Profiles various versions of LCA."""
nshort = 6
tshort = 2
nmed = 3
tmed = 6
nlong = 1
#Setup variables for inference
numDict = int(2048)
numBatch = int(128)
dataSize = int(256)
dictsIn = np.random.randn(numDict,dataSize)
# LCA requires that dictionary be unit norm
dictsIn = skp.normalize(dictsIn, axis=1)
stimuli = np.random.randn(numBatch,dataSize)
batchCoeffs = np.random.randn(numBatch,numDict)
coeffs = np.zeros((numBatch, numDict))
eta = .01
lamb = .05
nIter = 300
adapt = .99
softThresh = 0
thresh = np.random.randn(numBatch)
#LCA
params = """Parameters:
numDict: """+str(numDict)+"""
numBatch: """+str(numBatch)+"""
dataSize: """+str(dataSize)+"""
nIter: """+str(nIter)+"""\n"""
print params
start = timer()
lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = dt+timer()-start
dt = dt/(n_times)
print '---------------Numpy based LCA----------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
dictsIn = np.array(dictsIn,order='F')
stimuli = np.array(stimuli,order='F')
coeffs = np.array(coeffs,order='F')
batchCoeffs = np.array(batchCoeffs,order='F')
thresh = np.array(thresh,order='F')
start = timer()
lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize)
dt = dt+timer()-start
dt = dt/(n_times)
print '---------------Fortran based LCA--------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
dictsIn = np.array(dictsIn,dtype=np.float32,order='F')
stimuli = np.array(stimuli,dtype=np.float32,order='F')
start = timer()
lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = dt+timer()-start
dt = dt/(n_times)
print '----------------GPU based LCA-----------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
if __name__ == '__main__':
main()
| mit | -9,159,149,339,073,999,000 | 28.907407 | 117 | 0.586997 | false |
stcioc/localdocindex | python/scrape_iasi.py | 1 | 5682 | # -------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Stefan
#
# Created: 30.07.2017
# Copyright: (c) Stefan 2017
# Licence: <your licence>
# -------------------------------------------------------------------------------
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from scrape_interface import ScrapeProcessor
import re
# number of entity for which we download
_ENTITY = 4540712
# the only public method
def extractdata(sp):
print("Start processing entity " + str(_ENTITY))
_process_pvpage(sp)
_process_main(sp)
print("End processing entity " + str(_ENTITY))
def _process_main(sp):
# get the iframe address
href = "http://www.primaria-iasi.ro/portal-iasi/pmi/consiliul-local-al-municipiului-iasi/35/hotarari-adoptate"
html = ScrapeProcessor.download_page(href)
soup = BeautifulSoup(html, 'html.parser')
iframe = soup.find("iframe")
iframe_src = iframe["src"]
startHot = 0
while True:
endHot = _process_page(sp, iframe_src, startHot)
if endHot == startHot:
break
startHot = endHot + 1
def _process_pvpage(sp):
href = "http://www.primaria-iasi.ro/portal-iasi/pmi/consiliul-local-al-municipiului-iasi/36/procese-verbale-si-minute-ale-sedintelor-consiliului-local"
html = ScrapeProcessor.download_page(href)
soup = BeautifulSoup(html, 'html.parser')
tbl = soup.find(id="fisierePMI")
for tr in tbl.find_all("tr"):
cells = tr.find_all("td")
if not cells:
continue
link = cells[0].find("a")
title = link.text
title = re.sub(r"\s+", ' ', title).strip()
if "MINUTA" in title.upper() or "M I N U T A" in title.upper():
continue
datetext = ScrapeProcessor.finddate(title)
if datetext == "":
print("ERROR|No date at pv " + title)
continue
year = datetext[:4]
number = ScrapeProcessor.dayinyear(datetext)
if int(year) < ScrapeProcessor.currentyear() and sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD):
return
if link["href"] is None:
print("ERROR|No pdf download link at pv " + title)
continue
link = link["href"].replace("\\", "/")
if not link.startswith("http"):
link = urljoin(href, link)
code, result = _process_doc(sp, year, number, title, "PRVB", datetext, link)
if code == "ERROR":
print("ERROR|" + result + " on " + title + "/" + str(year))
continue
def _process_page(sp, iframe_src, startHot):
src = iframe_src
if startHot > 0:
src = iframe_src + "&Start=" + (str(startHot))
processedHot = 0
print("------------------------------------ "+src + " -------------------------------------")
html = ScrapeProcessor.download_page(src)
soup = BeautifulSoup(html, 'html.parser')
mainDiv = soup.find(id="MainTableViewBody")
mainTable = mainDiv.find("table")
if mainTable is None:
return startHot
for tr in mainTable.find_all("tr"):
cells = tr.find_all("td")
if not cells:
continue
processedHot += 1
year = cells[0].text
number = cells[1].text
title = cells[2].text
title = re.sub(r"\s+", ' ', title).strip()
if int(year) < ScrapeProcessor.currentyear() and sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD):
break
pdflink = None
for a in cells[2].find_all("a"):
if "hotarari.nsf" not in a["href"]:
pdflink = a
if pdflink is None:
print("ERROR|No pdf download link at hot " + str(year) + " - " + str(number) + " " + title)
continue
if pdflink["href"] is None:
print("ERROR|No pdf download link at hot " + str(year) + " - " + str(number) + " " + title)
continue
link = pdflink["href"].replace("\\", "/")
if not link.startswith("http"):
link = urljoin(iframe_src, link)
code, result = _process_doc(sp, year, number, title, "HOTA", "", link)
if code == "ERROR":
print("ERROR|" + result + " on " + title + "/" + str(year))
continue
return startHot + processedHot
def _process_doc(sp, year, number, title, type, date, link):
# add the decision to server
code, result = sp.post_decision(type, number, year, _ENTITY, date, title)
if code == "ERROR":
return code, result
decisionid = result
# download file
code, result = sp.download_file(link)
if code == "ERROR":
sp.post_document("MAIN", decisionid, 0, "ERROR_DOWNLOAD", date, link)
return code, result
fname = result
code, result, filetype = sp.ocr_document(fname)
if code == "ERROR":
sp.post_document("MAIN", decisionid, 0, "ERROR_OCR", date, link)
return code, result
ocrfname = result
outstr, cssstr = ScrapeProcessor.preparehtml(ocrfname, filetype)
return sp.post_document("MAIN", decisionid, 0, outstr, cssstr, link)
if __name__ == '__main__':
localsp = ScrapeProcessor("http://192.168.56.10", "stefan_cioc", "parola1234")
localsp.set_folders("X:/hot/IASII", "X:/hot/IASIO")
localsp.set_processmode(ScrapeProcessor.ProcessMode.FULL)
extractdata(localsp) | mit | -447,915,198,326,569,400 | 33.748428 | 161 | 0.557198 | false |
driftyco/ionitron-issues | tasks/github_issue_submit.py | 1 | 8606 | import github_api
import util
from config.config import CONFIG_VARS as cvar
from datetime import datetime, timedelta
def flag_if_submitted_through_github(repo_username, repo_id, issue):
return False # temporarily disabling ionitron
"""
Flags any issue that is submitted through github's UI, and not the Ionic site.
Adds a label, as well as a comment, to force the issue through the custom form.
@return: whether or not the issue was flagged (bool)
"""
if not issue:
return False
number = issue.get('number')
if not number:
return False
user = issue.get('user')
if not user:
return False
if not issue.get('body'):
return False
if is_valid_issue_opened_source(repo_username, repo_id, issue):
return False
context = {
'issue': issue,
'user': user
}
msg = util.get_template('RESUBMIT_TEMPLATE', context)
github_api.create_issue_comment(repo_username, repo_id, number, msg)
return True
def is_valid_issue_opened_source(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID'], test_is_org_member=True):
if has_content_from_custom_submit_form(issue):
return True
if test_is_org_member:
if github_api.is_org_member(repo_username, issue['user']['login']):
return True
if has_needs_resubmit_content_id(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id):
return True
return False
def has_content_from_custom_submit_form(issue):
body = issue.get('body')
if body:
return 'is-issue-template' in body
return False
def has_needs_resubmit_content_id(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID']):
comment = get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id)
return not comment is None
def get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID']):
if issue_comments is None:
issue_comments = github_api.fetch_issue_comments(repo_username, repo_id, issue.get('number'))
if issue_comments and isinstance(issue_comments, list):
for issue_comment in issue_comments:
body = issue_comment.get('body')
if body and needs_resubmit_content_id in body:
return issue_comment
def remove_flag_if_submitted_through_github(repo_username, repo_id, issue, issue_comments=None, is_debug=cvar['DEBUG']):
"""
Removes the notice flag (automated comments and label) if the issue has been
resubmitted through the custom form on the Ionic site.
@param issueNum: the issue number that should be refreshed (string)
@return: whether or not the flag was removed (bool)
"""
if not issue:
return False
number = issue.get('number')
if not number:
return False
if not has_content_from_custom_submit_form(issue):
return False
if not has_needs_resubmit_content_id(repo_username, repo_id, issue, issue_comments=issue_comments):
return False
if not is_debug:
github_api.delete_automated_issue_comments(repo_username, repo_id, number)
return True
def remove_flag_if_not_updated(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID'], remove_form_resubmit_comment_after=cvar['REMOVE_FORM_RESUBMIT_COMMENT_AFTER'], now=datetime.now(), is_debug=cvar['DEBUG']):
if not issue:
return False
number = issue.get('number')
if not number:
return False
if has_content_from_custom_submit_form(issue):
return False
comment = get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id)
if comment is None:
return False
created_at = util.get_date(comment.get('created_at'))
if created_at is None:
return False
remove_date = created_at + timedelta(days=remove_form_resubmit_comment_after)
if remove_date > now:
return False
if not is_debug:
github_api.delete_automated_issue_comments(repo_username, repo_id, number)
return True
def remove_flag_when_closed(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID'], is_debug=cvar['DEBUG']):
if not issue:
return False
number = issue.get('number')
if not number:
return False
if has_content_from_custom_submit_form(issue):
return False
comment = get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id)
if comment is None:
return False
comment_id = comment.get('id')
if comment_id is None:
return False
if not is_debug:
github_api.delete_issue_comment(repo_username, repo_id, comment_id, number=number)
return True
def add_label_from_content(repo_username, repo_id, issue):
add_labels = []
title = issue.get('title', '').lower().replace(':', ' ').replace('(', ' ').replace(')', ' ').replace('.', ' ').replace('@', ' ').replace('&', ' ').replace('!', ' ').replace('-', ' ').replace(';', ' ')
body = issue.get('body', '').lower()
body_cleaned = body.replace(' ', '').replace(':', '').replace('*', '').replace('#', '').replace('.', '').replace('(', '').replace(')', '').replace('&', '').replace('!', '').replace(';', '').replace('-', '').replace('<', '').replace('>', '').replace('/', '')
if not has_label(issue, 'docs') and (title.startswith('docs ') or '<span ionic-type>docs</span>' in body):
add_labels.append('docs')
elif not has_label(issue, 'feature') and '<span ionic-type>feat</span>' in body:
add_labels.append('feature')
if not has_label(issue, 'v2') and (title.startswith('v2 ') or (' v2 ' in title) or ('ionic2' in title) or ('ionic 2' in title) or ('ionicv2' in title) or ('ionic2' in body_cleaned) or ('ionicv2' in body_cleaned) or ('ionicversion2' in body_cleaned) or (' v2 ' in body)):
add_labels.append('v2')
elif not has_label(issue, 'v1') and (title.startswith('v1 ') or (' v1 ' in title) or ('ionic1' in title) or ('ionic 1' in title) or ('ionicv1' in title) or ('ionic1' in body_cleaned) or ('ionicv1' in body_cleaned) or ('ionicversion1' in body_cleaned) or (' v1 ' in body)):
add_labels.append('v1')
labels = {
'actionsheet': ['actionsheet', 'action-sheet', 'action sheet'],
'alert': ['alert', 'popup'],
'animation': ['animation', 'animate'],
'checkbox': ['checkbox'],
'footer': ['footer'],
'header': ['header'],
'infinitescroll': ['infinitescroll', 'infinite scroll', 'infinite-scroll'],
'keyboard': ['keyboard'],
'menus': ['menu'],
'modals': ['modal'],
'navigation': ['navigation'],
'platform:android': ['android', 'samsung', 'galaxy', 'moto', 'nexus', 'htc', 'amazon'],
'platform:ios': ['ios', 'iphone', 'ipad', 'ipod'],
'platform:windows': ['wp8', 'windows phone', 'wp10'],
'popover': ['popover'],
'refresher': ['refresher', 'pull-to-refresh', 'ptr', 'pull to refresh', 'pulltorefresh', 'ion-refresher', 'ionrefresher', 'ion refresher'],
'radio': ['radio'],
'range': ['range', 'slider'],
'slidebox': ['slidebox', 'swiper', 'ion-slides', 'ionslides', 'ion slides'],
'select': ['ion-select', 'ionselect', 'ion select'],
'toggle': ['ion-toggle', 'iontoggle', 'ion toggle'],
'virtualscroll': ['virtualscroll', 'virtual scroll', 'virtual-scroll', 'collectionrepeat', 'collection repeat', 'collection-repeat'],
}
for label, keywords in labels.iteritems():
for keyword in keywords:
if keyword in title or keyword in body:
add_labels.append(label)
break
return add_labels
def has_label(issue, label_name):
if not issue:
return False
try:
labels = issue.get('labels')
if not labels or not len(labels):
return False
for label in labels:
if label_name == label.get('name'):
return True
except Exception as ex:
print 'has_label error: %s' % ex
return False | mit | -6,871,149,245,831,520,000 | 36.585153 | 276 | 0.635603 | false |
nvazquez/Turtlebots | setup.py | 1 | 13419 | #! /usr/bin/python
#
# Copyright (C) 2008 Red Hat, Inc.
# Copyright (C) 2013 Alan Aguiar
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import operator
import os
import sys
import shutil
import subprocess
import re
import gettext
from optparse import OptionParser
import logging
from fnmatch import fnmatch
from ConfigParser import ConfigParser
IGNORE_DIRS = ['dist', '.git', 'debian', '.pc', 'locale']
IGNORE_FILES = ['.gitignore', 'MANIFEST', '*.pyc', '*~', '*.bak', 'pseudo.po']
def list_files(base_dir, ignore_dirs=None, ignore_files=None):
result = []
base_dir = os.path.abspath(base_dir)
for root, dirs, files in os.walk(base_dir):
if ignore_files:
for pattern in ignore_files:
files = [f for f in files if not fnmatch(f, pattern)]
rel_path = root[len(base_dir) + 1:]
for f in files:
result.append(os.path.join(rel_path, f))
if ignore_dirs and root == base_dir:
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
return result
class Config(object):
def __init__(self, source_dir=None, dist_dir=None, dist_name=None):
self.source_dir = source_dir or os.getcwd()
self.dist_dir = dist_dir or os.path.join(self.source_dir, 'dist')
self.dist_name = dist_name
self.bundle = None
self.version = None
self.activity_name = None
self.bundle_id = None
self.bundle_name = None
self.bundle_root_dir = None
self.tar_root_dir = None
self.xo_name = None
self.tar_name = None
self.update()
def update(self):
cp = ConfigParser()
info_file = os.path.abspath('./activity/activity.info')
cp.read(info_file)
if cp.has_option('Activity', 'activity_version'):
self.version = cp.get('Activity', 'activity_version')
else:
print 'Activity bundle has invalid version number'
if cp.has_option('Activity', 'name'):
self.activity_name = cp.get('Activity', 'name')
else:
print 'Activity bundle does not specify a name'
if cp.has_option('Activity', 'bundle_id'):
self.bundle_id = cp.get('Activity', 'bundle_id')
else:
print 'Activity bundle does not specify a bundle id'
self.bundle_name = reduce(operator.add, self.activity_name.split())
self.bundle_root_dir = self.bundle_name + '.activity'
self.tar_root_dir = '%s-%s' % (self.bundle_name, self.version)
if self.dist_name:
self.xo_name = self.tar_name = self.dist_name
else:
self.xo_name = '%s-%s.xo' % (self.bundle_name, self.version)
self.tar_name = '%s-%s.tar.bz2' % (self.bundle_name, self.version)
class Builder(object):
def __init__(self, config):
self.config = config
def build(self):
self.build_locale()
def build_locale(self):
po_dir = os.path.join(self.config.source_dir, 'po')
if not os.path.isdir(po_dir):
logging.warn('Missing po/ dir, cannot build_locale')
return
locale_dir = os.path.join(self.config.source_dir, 'locale')
if os.path.exists(locale_dir):
shutil.rmtree(locale_dir)
for f in os.listdir(po_dir):
if not f.endswith('.po') or f == 'pseudo.po':
continue
file_name = os.path.join(po_dir, f)
lang = f[:-3]
localedir = os.path.join(self.config.source_dir, 'locale', lang)
mo_path = os.path.join(localedir, 'LC_MESSAGES')
if not os.path.isdir(mo_path):
os.makedirs(mo_path)
mo_file = os.path.join(mo_path, '%s.mo' % self.config.bundle_id)
args = ['msgfmt', '--output-file=%s' % mo_file, file_name]
retcode = subprocess.call(args)
if retcode:
print 'ERROR - msgfmt failed with return code %i.' % retcode
exit (1)
cat = gettext.GNUTranslations(open(mo_file, 'r'))
translated_name = cat.gettext(self.config.activity_name)
linfo_file = os.path.join(localedir, 'activity.linfo')
f = open(linfo_file, 'w')
f.write('[Activity]\nname = %s\n' % translated_name)
f.close()
def get_files(self):
allfiles = list_files(self.config.source_dir,
IGNORE_DIRS, IGNORE_FILES)
return allfiles
class Packager(object):
def __init__(self, config):
self.config = config
self.package_path = None
if not os.path.exists(self.config.dist_dir):
os.mkdir(self.config.dist_dir)
def get_files_in_git(self):
git_ls = subprocess.Popen(['git', 'ls-files'], stdout=subprocess.PIPE,
cwd=self.config.source_dir)
stdout, _ = git_ls.communicate()
if git_ls.returncode:
# Fall back to filtered list
return list_files(self.config.source_dir,
IGNORE_DIRS, IGNORE_FILES)
# pylint: disable=E1103
files = [path.strip() for path in stdout.strip('\n').split('\n')]
# remove possible unnecesary tracked files
for pattern in IGNORE_FILES:
files = [f for f in files if not fnmatch(f, pattern)]
return files
class XOPackager(Packager):
def __init__(self, builder):
Packager.__init__(self, builder.config)
self.builder = builder
self.builder.build_locale()
self.package_path = os.path.join(self.config.dist_dir,
self.config.xo_name)
def package(self):
import zipfile
bundle_zip = zipfile.ZipFile(self.package_path, 'w',
zipfile.ZIP_DEFLATED)
for f in self.get_files_in_git():
bundle_zip.write(os.path.join(self.config.source_dir, f),
os.path.join(self.config.bundle_root_dir, f))
locale_dir = os.path.join(self.config.source_dir, 'locale')
locale_files = list_files(locale_dir, IGNORE_DIRS, IGNORE_FILES)
for f in locale_files:
bundle_zip.write(os.path.join(locale_dir, f),
os.path.join(self.config.bundle_root_dir,
'locale', f))
bundle_zip.close()
class SourcePackager(Packager):
def __init__(self, config):
Packager.__init__(self, config)
self.package_path = os.path.join(self.config.dist_dir,
self.config.tar_name)
def package(self):
import tarfile
tar = tarfile.open(self.package_path, 'w:bz2')
for f in self.get_files_in_git():
tar.add(os.path.join(self.config.source_dir, f),
os.path.join(self.config.tar_root_dir, f))
tar.close()
class Installer(object):
IGNORES = ['po/*', 'MANIFEST', 'AUTHORS']
def __init__(self, builder):
self.config = builder.config
self.builder = builder
def should_ignore(self, f):
for pattern in self.IGNORES:
if fnmatch(f, pattern):
return True
return False
def install(self, prefix):
self.builder.build()
activity_path = os.path.join(prefix, 'share', 'sugar', 'activities',
self.config.bundle_root_dir)
source_to_dest = {}
for f in self.builder.get_files():
if self.should_ignore(f):
pass
elif f.startswith('locale/') and f.endswith('.mo'):
source_to_dest[f] = os.path.join(prefix, 'share', f)
else:
source_to_dest[f] = os.path.join(activity_path, f)
for source, dest in source_to_dest.items():
print 'Install %s to %s.' % (source, dest)
path = os.path.dirname(dest)
if not os.path.exists(path):
os.makedirs(path)
shutil.copy(source, dest)
self.config.bundle.install_mime_type(self.config.source_dir)
def cmd_dev(config, args):
"""Setup for development"""
if args:
print 'ERROR - Wrong argument.'
print 'Usage: %prog dev'
exit (1)
bundle_path = os.path.expanduser('~/Activities')
if not os.path.isdir(bundle_path):
os.mkdir(bundle_path)
bundle_path = os.path.join(bundle_path, config.bundle_root_dir)
try:
os.symlink(config.source_dir, bundle_path)
except OSError:
if os.path.islink(bundle_path):
print 'ERROR - The bundle has been already setup for development.'
else:
print 'ERROR - A bundle with the same name is already installed.'
def cmd_dist_xo(config, args):
"""Create a xo bundle package"""
if args:
print 'ERROR - Wrong argument.'
print 'Usage: %prog dist_xo'
exit (1)
packager = XOPackager(Builder(config))
packager.package()
def cmd_fix_manifest(config, args):
'''Add missing files to the manifest (OBSOLETE)'''
print 'WARNING: The fix_manifest command is obsolete.'
print ' The MANIFEST file is no longer used in bundles,'
print ' please remove it.'
def cmd_dist_source(config, args):
"""Create a tar source package"""
if args:
print 'ERROR - Wrong argument.'
print 'Usage: %prog dist_source'
exit (1)
packager = SourcePackager(config)
packager.package()
def cmd_install(config, args):
"""Install the activity in the system"""
parser = OptionParser(usage='usage: %prog install [options]')
parser.add_option('--prefix', dest='prefix', default=sys.prefix,
help='Prefix to install files to')
(suboptions, subargs) = parser.parse_args(args)
if subargs:
print 'ERROR - Wrong argument.'
parser.print_help()
exit (1)
installer = Installer(Builder(config))
installer.install(suboptions.prefix)
def cmd_genpot(config, args):
"""Generate the gettext pot file"""
if args:
print 'ERROR - Wrong argument.'
print 'Usage: %prog genpot'
exit (1)
po_path = os.path.join(config.source_dir, 'po')
if not os.path.isdir(po_path):
os.mkdir(po_path)
python_files = []
for root, dirs_dummy, files in os.walk(config.source_dir):
for file_name in files:
if file_name.endswith('.py'):
file_path = os.path.relpath(os.path.join(root, file_name),
config.source_dir)
python_files.append(file_path)
# First write out a stub .pot file containing just the translated
# activity name, then have xgettext merge the rest of the
# translations into that. (We can't just append the activity name
# to the end of the .pot file afterwards, because that might
# create a duplicate msgid.)
pot_file = os.path.join('po', '%s.pot' % config.bundle_name)
escaped_name = re.sub('([\\\\"])', '\\\\\\1', config.activity_name)
f = open(pot_file, 'w')
f.write('#: activity/activity.info:2\n')
f.write('msgid "%s"\n' % escaped_name)
f.write('msgstr ""\n')
f.close()
args = ['xgettext', '--join-existing', '--language=Python',
'--keyword=_', '--add-comments=TRANS:', '--output=%s' % pot_file]
args += python_files
retcode = subprocess.call(args)
if retcode:
print 'ERROR - xgettext failed with return code %i.' % retcode
exit (1)
def cmd_build(config, args):
"""Build generated files"""
if args:
print 'ERROR - Wrong argument.'
print 'Usage: %prog build'
exit (1)
builder = Builder(config)
builder.build()
def print_commands():
print 'Available commands:\n'
for name, func in globals().items():
if name.startswith('cmd_'):
print '%-20s %s' % (name.replace('cmd_', ''), func.__doc__)
print '\n(Type "./setup.py <command> --help" for help about a ' \
'particular command\'s options.'
def start(bundle_name=None):
if bundle_name:
logging.warn('bundle_name deprecated, now comes from activity.info')
parser = OptionParser(usage='[action] [options]')
parser.disable_interspersed_args()
(options_, args) = parser.parse_args()
config = Config()
try:
globals()['cmd_' + args[0]](config, args[1:])
except (KeyError, IndexError):
print 'ERROR - Wrong command or argument.'
print_commands()
exit (1)
if __name__ == '__main__':
start()
| mit | -4,032,114,253,760,926,700 | 30.5 | 78 | 0.581042 | false |
Dunkas12/BeepBoopBot | lib/youtube_dl/extractor/go.py | 1 | 6977 | # coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..utils import (
int_or_none,
determine_ext,
parse_age_limit,
urlencode_postdata,
ExtractorError,
)
class GoIE(AdobePassIE):
_SITE_INFO = {
'abc': {
'brand': '001',
'requestor_id': 'ABC',
},
'freeform': {
'brand': '002',
'requestor_id': 'ABCFamily',
},
'watchdisneychannel': {
'brand': '004',
'requestor_id': 'Disney',
},
'watchdisneyjunior': {
'brand': '008',
'requestor_id': 'DisneyJunior',
},
'watchdisneyxd': {
'brand': '009',
'requestor_id': 'DisneyXD',
}
}
_VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P<id>\w+)|season-\d+/\d+-(?P<display_id>[^/?#]+))' % '|'.join(_SITE_INFO.keys())
_TESTS = [{
'url': 'http://abc.go.com/shows/castle/video/most-recent/vdka0_g86w5onx',
'info_dict': {
'id': '0_g86w5onx',
'ext': 'mp4',
'title': 'Sneak Peek: Language Arts',
'description': 'md5:7dcdab3b2d17e5217c953256af964e9c',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://abc.go.com/shows/after-paradise/video/most-recent/vdka3335601',
'only_matching': True,
}]
def _real_extract(self, url):
sub_domain, video_id, display_id = re.match(self._VALID_URL, url).groups()
if not video_id:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
# There may be inner quotes, e.g. data-video-id="'VDKA3609139'"
# from http://freeform.go.com/shows/shadowhunters/episodes/season-2/1-this-guilty-blood
r'data-video-id=["\']*VDKA(\w+)', webpage, 'video id')
site_info = self._SITE_INFO[sub_domain]
brand = site_info['brand']
video_data = self._download_json(
'http://api.contents.watchabc.go.com/vp2/ws/contents/3000/videos/%s/001/-1/-1/-1/%s/-1/-1.json' % (brand, video_id),
video_id)['video'][0]
title = video_data['title']
formats = []
for asset in video_data.get('assets', {}).get('asset', []):
asset_url = asset.get('value')
if not asset_url:
continue
format_id = asset.get('format')
ext = determine_ext(asset_url)
if ext == 'm3u8':
video_type = video_data.get('type')
data = {
'video_id': video_data['id'],
'video_type': video_type,
'brand': brand,
'device': '001',
}
if video_data.get('accesslevel') == '1':
requestor_id = site_info['requestor_id']
resource = self._get_mvpd_resource(
requestor_id, title, video_id, None)
auth = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
data.update({
'token': auth,
'token_type': 'ap',
'adobe_requestor_id': requestor_id,
})
else:
self._initialize_geo_bypass(['US'])
entitlement = self._download_json(
'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json',
video_id, data=urlencode_postdata(data), headers=self.geo_verification_headers())
errors = entitlement.get('errors', {}).get('errors', [])
if errors:
for error in errors:
if error.get('code') == 1002:
self.raise_geo_restricted(
error['message'], countries=['US'])
error_message = ', '.join([error['message'] for error in errors])
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
asset_url += '?' + entitlement['uplynkData']['sessionKey']
formats.extend(self._extract_m3u8_formats(
asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False))
else:
f = {
'format_id': format_id,
'url': asset_url,
'ext': ext,
}
if re.search(r'(?:/mp4/source/|_source\.mp4)', asset_url):
f.update({
'format_id': ('%s-' % format_id if format_id else '') + 'SOURCE',
'preference': 1,
})
else:
mobj = re.search(r'/(\d+)x(\d+)/', asset_url)
if mobj:
height = int(mobj.group(2))
f.update({
'format_id': ('%s-' % format_id if format_id else '') + '%dP' % height,
'width': int(mobj.group(1)),
'height': height,
})
formats.append(f)
self._sort_formats(formats)
subtitles = {}
for cc in video_data.get('closedcaption', {}).get('src', []):
cc_url = cc.get('value')
if not cc_url:
continue
ext = determine_ext(cc_url)
if ext == 'xml':
ext = 'ttml'
subtitles.setdefault(cc.get('lang'), []).append({
'url': cc_url,
'ext': ext,
})
thumbnails = []
for thumbnail in video_data.get('thumbnails', {}).get('thumbnail', []):
thumbnail_url = thumbnail.get('value')
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('longdescription') or video_data.get('description'),
'duration': int_or_none(video_data.get('duration', {}).get('value'), 1000),
'age_limit': parse_age_limit(video_data.get('tvrating', {}).get('rating')),
'episode_number': int_or_none(video_data.get('episodenumber')),
'series': video_data.get('show', {}).get('title'),
'season_number': int_or_none(video_data.get('season', {}).get('num')),
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 | 7,284,084,372,982,987,000 | 39.32948 | 161 | 0.457933 | false |
xmnlab/minilab | arch/socket_arch/util.py | 1 | 2721 | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
def extract_devices(sensors):
_DEVICE = defaultdict(dict)
for sensors_group in sensors:
if not sensors[sensors_group]['acquisition_mode']:
continue
# analog channels
for item in sensors[sensors_group]['channels']:
for channel in sorted(item, key=lambda i: item[i]):
device_name = channel.split('/')[0]
if not _DEVICE[device_name]:
_DEVICE[device_name] = {
'name': device_name,
'analog': [],
'digital': [],
'rate': sensors[sensors_group]['rate'],
'minv': sensors[sensors_group]['minv'],
'maxv': sensors[sensors_group]['maxv'],
'seconds_to_acquire': (
sensors[sensors_group]['seconds_to_acquire']
)
}
_DEVICE[device_name]['analog'] += [channel]
# digital channels
if not sensors[sensors_group]['trigger']:
continue
trigger_name = sensors[sensors_group]['trigger']
device_name = trigger_name.split('/')[0]
if not _DEVICE[device_name]:
_DEVICE[device_name] = {
'name': device_name,
'analog': [],
'digital': [],
'rate': 1000,
'minv': -10,
'maxv': +10,
'seconds_to_acquire': 1.0
}
_DEVICE[device_name]['digital'] += list(trigger_name)
return _DEVICE
def extract_channels(sensors):
_SENSORS_GROUPS = defaultdict(dict)
for sensors_name in sensors:
if sensors_name == 'temperature':
continue
if not sensors[sensors_name]['acquisition_mode']:
continue
# analog channels
for item in sensors[sensors_name]['channels']:
for channel in sorted(item, key=lambda i: item[i]):
if not _SENSORS_GROUPS[sensors_name]:
_SENSORS_GROUPS[sensors_name] = []
_SENSORS_GROUPS[sensors_name] += [channel]
# digital channels
if sensors[sensors_name]['trigger']:
_SENSORS_GROUPS[sensors_name] += [sensors[sensors_name]['trigger']]
if (
sensors[sensors_name]['temperature_channels']
):
for channels in sensors[sensors_name]['temperature_channels']:
for channel in channels:
_SENSORS_GROUPS[sensors_name] += [channel]
return _SENSORS_GROUPS
| gpl-3.0 | 4,599,358,786,626,185,700 | 32.592593 | 79 | 0.502021 | false |
RyanSkraba/beam | sdks/python/apache_beam/runners/interactive/interactive_environment_test.py | 1 | 10454 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.runners.interactive.interactive_environment."""
from __future__ import absolute_import
import importlib
import sys
import unittest
import apache_beam as beam
from apache_beam.runners import runner
from apache_beam.runners.interactive import cache_manager as cache
from apache_beam.runners.interactive import interactive_environment as ie
# TODO(BEAM-8288): clean up the work-around of nose tests using Python2 without
# unittest.mock module.
try:
from unittest.mock import call, patch
except ImportError:
from mock import call, patch
# The module name is also a variable in module.
_module_name = 'apache_beam.runners.interactive.interactive_environment_test'
@unittest.skipIf(sys.version_info < (3, 6),
'The tests require at least Python 3.6 to work.')
class InteractiveEnvironmentTest(unittest.TestCase):
def setUp(self):
self._p = beam.Pipeline()
self._var_in_class_instance = 'a var in class instance'
ie.new_env()
def assertVariableWatched(self, variable_name, variable_val):
self.assertTrue(self._is_variable_watched(variable_name, variable_val))
def assertVariableNotWatched(self, variable_name, variable_val):
self.assertFalse(self._is_variable_watched(variable_name, variable_val))
def _is_variable_watched(self, variable_name, variable_val):
return any([(variable_name, variable_val) in watching for watching in
ie.current_env().watching()])
def _a_function_with_local_watched(self):
local_var_watched = 123 # pylint: disable=possibly-unused-variable
ie.current_env().watch(locals())
def _a_function_not_watching_local(self):
local_var_not_watched = 456 # pylint: disable=unused-variable
def test_watch_main_by_default(self):
self.assertTrue('__main__' in ie.current_env()._watching_set)
# __main__ module has variable __name__ with value '__main__'
self.assertVariableWatched('__name__', '__main__')
def test_watch_a_module_by_name(self):
self.assertFalse(
_module_name in ie.current_env()._watching_set)
self.assertVariableNotWatched('_module_name', _module_name)
ie.current_env().watch(_module_name)
self.assertTrue(
_module_name in
ie.current_env()._watching_set)
self.assertVariableWatched('_module_name', _module_name)
def test_watch_a_module_by_module_object(self):
module = importlib.import_module(_module_name)
self.assertFalse(module in ie.current_env()._watching_set)
self.assertVariableNotWatched('_module_name', _module_name)
ie.current_env().watch(module)
self.assertTrue(module in ie.current_env()._watching_set)
self.assertVariableWatched('_module_name', _module_name)
def test_watch_locals(self):
self.assertVariableNotWatched('local_var_watched', 123)
self.assertVariableNotWatched('local_var_not_watched', 456)
self._a_function_with_local_watched()
self.assertVariableWatched('local_var_watched', 123)
self._a_function_not_watching_local()
self.assertVariableNotWatched('local_var_not_watched', 456)
def test_watch_class_instance(self):
self.assertVariableNotWatched('_var_in_class_instance',
self._var_in_class_instance)
ie.current_env().watch(self)
self.assertVariableWatched('_var_in_class_instance',
self._var_in_class_instance)
def test_fail_to_set_pipeline_result_key_not_pipeline(self):
class NotPipeline(object):
pass
with self.assertRaises(AssertionError) as ctx:
ie.current_env().set_pipeline_result(NotPipeline(),
runner.PipelineResult(
runner.PipelineState.RUNNING))
self.assertTrue('pipeline must be an instance of apache_beam.Pipeline '
'or its subclass' in ctx.exception)
def test_fail_to_set_pipeline_result_value_not_pipeline_result(self):
class NotResult(object):
pass
with self.assertRaises(AssertionError) as ctx:
ie.current_env().set_pipeline_result(self._p, NotResult())
self.assertTrue('result must be an instance of '
'apache_beam.runners.runner.PipelineResult or its '
'subclass' in ctx.exception)
def test_set_pipeline_result_successfully(self):
class PipelineSubClass(beam.Pipeline):
pass
class PipelineResultSubClass(runner.PipelineResult):
pass
pipeline = PipelineSubClass()
pipeline_result = PipelineResultSubClass(runner.PipelineState.RUNNING)
ie.current_env().set_pipeline_result(pipeline, pipeline_result)
self.assertIs(ie.current_env().pipeline_result(pipeline), pipeline_result)
def test_determine_terminal_state(self):
for state in (runner.PipelineState.DONE,
runner.PipelineState.FAILED,
runner.PipelineState.CANCELLED,
runner.PipelineState.UPDATED,
runner.PipelineState.DRAINED):
ie.current_env().set_pipeline_result(self._p, runner.PipelineResult(
state))
self.assertTrue(ie.current_env().is_terminated(self._p))
for state in (runner.PipelineState.UNKNOWN,
runner.PipelineState.STARTING,
runner.PipelineState.STOPPED,
runner.PipelineState.RUNNING,
runner.PipelineState.DRAINING,
runner.PipelineState.PENDING,
runner.PipelineState.CANCELLING,
runner.PipelineState.UNRECOGNIZED):
ie.current_env().set_pipeline_result(self._p, runner.PipelineResult(
state))
self.assertFalse(ie.current_env().is_terminated(self._p))
def test_evict_pipeline_result(self):
pipeline_result = runner.PipelineResult(runner.PipelineState.DONE)
ie.current_env().set_pipeline_result(self._p, pipeline_result)
self.assertIs(ie.current_env().evict_pipeline_result(self._p),
pipeline_result)
self.assertIs(ie.current_env().pipeline_result(self._p), None)
def test_pipeline_result_is_none_when_pipeline_absent(self):
self.assertIs(ie.current_env().pipeline_result(self._p), None)
self.assertIs(ie.current_env().is_terminated(self._p), True)
self.assertIs(ie.current_env().evict_pipeline_result(self._p), None)
@patch('atexit.register')
def test_no_cleanup_when_cm_none(self,
mocked_atexit):
ie.new_env(None)
mocked_atexit.assert_not_called()
@patch('atexit.register')
def test_cleanup_when_cm_not_none(self,
mocked_atexit):
ie.new_env(cache.FileBasedCacheManager())
mocked_atexit.assert_called_once()
@patch('atexit.register')
@patch('atexit.unregister')
def test_cleanup_unregistered_when_not_none_cm_cleared(self,
mocked_unreg,
mocked_reg):
ie.new_env(cache.FileBasedCacheManager())
mocked_reg.assert_called_once()
mocked_unreg.assert_not_called()
ie.current_env().set_cache_manager(None)
mocked_reg.assert_called_once()
mocked_unreg.assert_called_once()
@patch('atexit.register')
@patch('atexit.unregister')
def test_cleanup_reregistered_when_cm_changed(self,
mocked_unreg,
mocked_reg):
ie.new_env(cache.FileBasedCacheManager())
mocked_unreg.assert_not_called()
ie.current_env().set_cache_manager(cache.FileBasedCacheManager())
mocked_unreg.assert_called_once()
mocked_reg.assert_has_calls([call(ie.current_env().cleanup),
call(ie.current_env().cleanup)])
@patch('apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.cleanup')
def test_cleanup_invoked_when_new_env_replace_not_none_env(self,
mocked_cleanup):
ie._interactive_beam_env = None
ie.new_env(cache.FileBasedCacheManager())
mocked_cleanup.assert_not_called()
ie.new_env(cache.FileBasedCacheManager())
mocked_cleanup.assert_called_once()
@patch('apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.cleanup')
def test_cleanup_invoked_when_cm_changed(self,
mocked_cleanup):
ie._interactive_beam_env = None
ie.new_env(cache.FileBasedCacheManager())
ie.current_env().set_cache_manager(cache.FileBasedCacheManager())
mocked_cleanup.assert_called_once()
@patch('atexit.register')
@patch('atexit.unregister')
def test_cleanup_registered_when_none_cm_changed(self,
mocked_unreg,
mocked_reg):
ie.new_env(None)
mocked_reg.assert_not_called()
mocked_unreg.assert_not_called()
ie.current_env().set_cache_manager(cache.FileBasedCacheManager())
mocked_reg.assert_called_once()
mocked_unreg.assert_not_called()
@patch('atexit.register')
@patch('atexit.unregister')
def test_noop_when_cm_is_not_changed(self,
mocked_unreg,
mocked_reg):
cache_manager = cache.FileBasedCacheManager()
ie.new_env(cache_manager)
mocked_unreg.assert_not_called()
mocked_reg.assert_called_once()
ie.current_env().set_cache_manager(cache_manager)
mocked_unreg.assert_not_called()
mocked_reg.assert_called_once()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 5,558,994,031,803,054,000 | 40.320158 | 79 | 0.65946 | false |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/Precond/MHDstabtest.py | 1 | 11477 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
m = 7
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitSquareMesh(nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =1e1
MU = 1.0
IterType = 'Full'
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, 1e-6)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
# plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"DG")
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"DG")
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
# P = CP.Assemble(PP)
u_is = PETSc.IS().createGeneral(range(FSpaces[0].dim()))
b_is = PETSc.IS().createGeneral(range(FSpaces[0].dim()+FSpaces[1].dim(),FSpaces[0].dim()+FSpaces[1].dim()+FSpaces[2].dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-3
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
tic()
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b = CP.Assemble(AA,bb)
# if iter == 1
if iter == 1:
u = b.duplicate()
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
# if iter == 1:
if iter == 1:
u = b.duplicate()
print ("{:40}").format("MHD assemble, time: "), " ==> ",("{:4f}").format(toc()), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
stime = time.time()
# ksp.solve(b, u)
u,it1,it2 = S.solve(A,b,u,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol,HiptmairMatrices,KSPlinearfluids,kspF,Fp,MatrixLinearFluids,kspFp)
Soltime = time.time()- stime
NSits += it1
Mits +=it2
SolutionTime = SolutionTime +Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
# if eps > 100 and iter > 3:
# print 22222
# break
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
# iter = 10000
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
ue =u0
pe = p0
be = b0
re = r0
# ExactSolution = [ue,pe,be,re]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim, "DG")
# if xx > 1:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["l","SolTime","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((level,SolTime,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
# print LagrangeTable
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(ue,Velocity))
# plot(p_k)
# plot(interpolate(pe,Pressure))
# plot(b_k)
# plot(interpolate(be,Magnetic))
# plot(r_k)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
| mit | -7,340,128,029,880,071,000 | 31.979885 | 168 | 0.626906 | false |
timlinux/inasafe | safe/gui/gui_utilities.py | 1 | 2330 | # coding=utf-8
"""GUI utilities for the dock and the multi Exposure Tool."""
from PyQt4.QtCore import Qt
from qgis.core import QgsMapLayerRegistry
from safe.utilities.i18n import tr
def layer_from_combo(combo):
"""Get the QgsMapLayer currently selected in a combo.
Obtain QgsMapLayer id from the userrole of the QtCombo and return it as a
QgsMapLayer.
:returns: The currently selected map layer a combo.
:rtype: QgsMapLayer
"""
index = combo.currentIndex()
if index < 0:
return None
layer_id = combo.itemData(index, Qt.UserRole)
layer = QgsMapLayerRegistry.instance().mapLayer(layer_id)
return layer
def add_ordered_combo_item(
combo, text, data=None, count_selected_features=None, icon=None):
"""Add a combo item ensuring that all items are listed alphabetically.
Although QComboBox allows you to set an InsertAlphabetically enum
this only has effect when a user interactively adds combo items to
an editable combo. This we have this little function to ensure that
combos are always sorted alphabetically.
:param combo: Combo box receiving the new item.
:type combo: QComboBox
:param text: Display text for the combo.
:type text: str
:param data: Optional UserRole data to be associated with the item.
:type data: QVariant, str
:param count_selected_features: A count to display if the layer has some
selected features. Default to None, nothing will be displayed.
:type count_selected_features: None, int
:param icon: Icon to display in the combobox.
:type icon: QIcon
"""
if count_selected_features is not None:
text += ' (' + tr('{count} selected features').format(
count=count_selected_features) + ')'
size = combo.count()
for combo_index in range(0, size):
item_text = combo.itemText(combo_index)
# see if text alphabetically precedes item_text
if cmp(text.lower(), item_text.lower()) < 0:
if icon:
combo.insertItem(combo_index, icon, text, data)
else:
combo.insertItem(combo_index, text, data)
return
# otherwise just add it to the end
if icon:
combo.insertItem(size, icon, text, data)
else:
combo.insertItem(size, text, data)
| gpl-3.0 | -2,640,115,686,903,604,700 | 31.361111 | 77 | 0.672103 | false |
krig/jamaendo | jamaui/ossohelper.py | 1 | 4468 | # ossohelper.py - Helper to osso functions
#
# Copyright (c) 2008 INdT - Instituto Nokia de Tecnologia
#
# This file is part of carman-python.
# Modified for inclusion in Panucci (June 2009).
#
# carman-python is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# carman-python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
__log = logging.getLogger(__name__)
try:
import osso
__osso_imported__ = True
except ImportError:
__log.warning('osso module not found - are you running on a desktop?')
__osso_imported__ = False
DEVICE_STATE_NORMAL = "normal"
OSSO_DEVSTATE_MODE_FILE = "/tmp/.libosso_device_mode_cache"
__osso_context__ = None
__osso_application__ = None
__osso_device_state__ = None
def application_init(app_name, app_version):
"""
Osso application init.
"""
global __osso_context__, __osso_device_state__, __osso_application__
if __osso_imported__:
if has_osso():
__log.warning('osso application was already called. Ignoring...')
return
try:
__osso_context__ = osso.Context(app_name, app_version, False)
except Exception, err:
__log.warning('osso module found but could not be initialized: %s',
err)
__osso_context__ = None
return
try:
__osso_application__ = osso.Application(__osso_context__)
except Exception, err:
__log.warning('error creating osso application: %s' % err)
__osso_application__ = None
__log.debug( 'osso application init sent - %s v%s', app_name,
app_version)
__osso_device_state__ = osso.DeviceState(__osso_context__)
# application_init
def application_exit():
"""
Osso application exit.
"""
#if __osso_application__ is not None and __osso_context__ is not None:
# try:
# #__osso_application__.close()
# __osso_context__.close()
# except Exception, err:
# __log.warning('application end could not be sent: %s' % err)
# __log.debug('osso application end sent')
# application_exit
def application_top(app_name):
"""
Osso application top.
"""
if __osso_imported__ and __osso_application__:
try:
__osso_application__.application_top(app_name)
except Exception, err:
__log.warning( "Error calling application top for %s: %s",
app_name, err)
__log.info('osso application top for %s sent', app_name)
# application_top
def has_osso():
"""
Return if the osso module was initialized and all objects were created
without any problem
"""
return __osso_imported__ and not None in ( __osso_context__,
__osso_device_state__,
__osso_application__ )
# has_osso
def display_on():
"""
Turn on the display
"""
if __osso_device_state__ is not None:
__osso_device_state__.display_state_on()
__osso_device_state__.display_blanking_pause()
__log.info('osso display on')
# display_on
def display_blanking_pause():
"""
Keep the backlight on. Should be called every 45 seconds.
"""
if __osso_device_state__ is not None:
__osso_device_state__.display_blanking_pause()
__log.debug('osso blanking screen')
#display_blanking_pause
def get_device_state():
if __osso_device_state__ is not None:
cache_file_name = OSSO_DEVSTATE_MODE_FILE + "-" + str(os.getuid())
try:
state = os.readlink(cache_file_name)
except:
state = None
if not state:
__log.debug( "Failure to read device state from %s",
cache_file_name)
state = DEVICE_STATE_NORMAL
return state
else:
return DEVICE_STATE_NORMAL
| gpl-3.0 | -6,015,379,975,474,483,000 | 31.143885 | 79 | 0.596016 | false |
matmodlab/matmodlab2 | matmodlab2/ext_helpers/build_ext.py | 1 | 9765 | #!/usr/bin/env python
import os
import re
import sys
import glob
import shutil
import logging
import tempfile
from argparse import ArgumentParser
from subprocess import Popen, STDOUT
from contextlib import contextmanager
from matmodlab2.core.logio import get_logger
from matmodlab2.core.environ import environ
from matmodlab2.core.misc import is_listlike
ext_support_dir = os.path.dirname(os.path.realpath(__file__))
aba_support_dir = os.path.join(ext_support_dir, '../umat')
# "Lite" version of blas/lapack
lapack_lite = os.path.join(ext_support_dir, 'blas_lapack-lite.f')
lapack_lite_obj = os.path.splitext(lapack_lite)[0] + '.o'
assert os.path.isfile(lapack_lite)
# Fortran I/O
mml_io = os.path.join(ext_support_dir, 'mml_io.f90')
assert os.path.isfile(mml_io)
# Abaqus related files
aba_sdvini = os.path.join(aba_support_dir, 'aba_sdvini.f90')
assert os.path.isfile(aba_sdvini)
aba_utils = os.path.join(aba_support_dir, 'aba_utils.f90')
assert os.path.isfile(aba_utils)
umat_pyf = os.path.join(aba_support_dir, 'umat.pyf')
assert os.path.isfile(umat_pyf)
uhyper_pyf = os.path.join(aba_support_dir, 'uhyper.pyf')
assert os.path.isfile(uhyper_pyf)
tensalg_f90 = os.path.join(aba_support_dir, 'tensalg.f90')
assert os.path.isfile(tensalg_f90)
uhyper_wrap_f90 = os.path.join(aba_support_dir, 'uhyper_wrap.f90')
assert os.path.isfile(uhyper_wrap_f90)
class ExtensionNotBuilt(Exception):
pass
def which(name):
"""Find the executable name on PATH"""
for path in os.getenv('PATH', '').split(os.pathsep):
if not os.path.isdir(path):
continue
if os.path.isfile(os.path.join(path, name)):
return os.path.join(path, name)
return None
def clean_f2py_tracks(dirname):
if not os.path.isdir(dirname):
return
for pat in ('*.so.dSYM', '*-f2pywrappers2.*', '*module.c'):
for item in glob.glob(os.path.join(dirname, pat)):
if os.path.isdir(item):
shutil.rmtree(item)
else:
os.remove(item)
def build_extension_module(name, sources, include_dirs=None, verbose=False,
user_ics=False, fc=None, cwd=None):
"""Build the fortran extension module (material model)
Parameters
----------
name : str
The name of the extension module to build
sources : list of str
List of source files
include_dirs : list of str
List of extra include directories
verbose : bool
Write output to stdout if True, otherwise suppress stdout
user_ics : bool
List of source files includes source defining subroutine SDVINI.
Applicable only for Abaqus umat and uhyper.
fc : str
Fortran compiler
Notes
-----
To build abaqus umat, the name must be 'umat'
To build abaqus uhyper, the name must be 'uhyper'
"""
the_loglevel = environ.loglevel
environ.loglevel = logging.DEBUG
logger = get_logger('build-ext')
fc = fc or which('gfortran')
if fc is None:
raise OSError('Fortran compiler not found')
# Check source files
for source_file in sources:
if not os.path.isfile(source_file):
raise OSError('{0!r}: file not found'.format(source_file))
if name != '_matfuncs_sq3':
sources.append(mml_io)
# We'll add the object file back in
if lapack_lite in sources:
sources.remove(lapack_lite)
# Everyone get lapack!
if lapack_lite_obj not in sources:
sources.append(lapack_lite_obj)
if not os.path.isfile(lapack_lite_obj):
_build_blas_lapack(logger, fc)
include_dirs = include_dirs or []
umat = name.lower() == 'umat'
uhyper = name.lower() == 'uhyper'
if umat or uhyper:
# Build the umat module - add some Abaqus utility files
clean_f2py_tracks(aba_support_dir)
name = '_umat' if umat else '_uhyper'
sources.append(aba_utils)
if umat:
sources.append(umat_pyf)
elif uhyper:
sources.extend([uhyper_pyf, tensalg_f90, uhyper_wrap_f90])
if not user_ics:
sources.append(aba_sdvini)
include_dirs = include_dirs + [aba_support_dir]
if any(' ' in x for x in sources):
logger.warning('File paths with spaces are known to fail to build')
command = ['f2py', '-c']
# Build the fortran flags argument
fflags = ['-Wno-unused-dummy-argument', '-fPIC', '-shared']
if os.getenv('FCFLAGS'):
fflags.extend(os.environ['FCFLAGS'].split())
command.extend(['--f77flags={0!r}'.format(' '.join(fflags)),
'--f90flags={0!r}'.format(' '.join(fflags))])
command.extend(['--include-paths', ':'.join(include_dirs)])
command.extend(['-m', name])
command.extend(sources)
logger.info('building extension module {0!r}... '.format(name),
extra={'continued':1})
logfile = None
cwd = cwd or os.getcwd()
if verbose:
# Call directly - LOTS of output!
p = Popen(command, cwd=cwd)
p.wait()
elif environ.notebook:
from IPython.utils import io
with io.capture_output() as captured:
p = Popen(command, cwd=cwd)
p.wait()
else:
logfile = os.path.join(cwd, 'build.log')
with stdout_redirected(to=logfile), merged_stderr_stdout():
p = Popen(command, cwd=cwd)
p.wait()
logger.info('done')
if logfile is not None and logfile != sys.stdout:
os.remove(logfile)
# Return the loglevel back to what it was
environ.loglevel = the_loglevel
clean_f2py_tracks(cwd)
if p.returncode != 0:
logger.error('Failed to build')
raise ExtensionNotBuilt(name)
return 0
def _build_blas_lapack(logger, fc):
logger.info('building blas_lapack-lite... ', extra={'continued':1})
cmd = [fc, '-fPIC', '-shared', '-O3', lapack_lite, '-o' + lapack_lite_obj]
proc = Popen(cmd, stdout=open(os.devnull, 'a'), stderr=STDOUT,
cwd=ext_support_dir)
proc.wait()
if proc.returncode == 0:
logger.info('done')
else:
logger.info('failed')
return proc.returncode
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
"""From: http://stackoverflow.com/questions/4675728/
redirect-stdout-to-a-file-in-python/22434262#22434262
"""
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
stdout.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
def merged_stderr_stdout(): # $ exec 2>&1
return stdout_redirected(to=sys.stdout, stdout=sys.stderr)
def build_extension_module_as_subprocess(name, sources,
include_dirs=None, verbose=False,
user_ics=False, fc=None,
cwd=None):
"""Build the extension module, but call as a subprocess.
Parameters
----------
Same as build_extension_module
Notes
-----
This function exists since distutils can only be initialized once and we want to run build several different extensions
"""
build_extension_module(name, sources, include_dirs=include_dirs,
verbose=verbose, user_ics=user_ics, fc=fc)
return 0
def build_mml_matrix_functions():
"""Build the mml linear algebra library"""
name = '_matfuncs_sq3'
mfuncs_pyf = os.path.join(ext_support_dir, 'matrix_funcs.pyf')
mfuncs_f90 = os.path.join(ext_support_dir, 'matrix_funcs.f90')
dgpadm_f = os.path.join(ext_support_dir, 'dgpadm.f')
sources = [mfuncs_pyf, mfuncs_f90, lapack_lite, dgpadm_f]
package_path = os.path.join(ext_support_dir, '../core')
command = ['f2py', '-c']
command.extend(sources)
p = Popen(command, cwd=package_path)
p.wait()
if p.returncode != 0:
raise ExtensionNotBuilt(name)
return 0
def main():
p = ArgumentParser()
p.add_argument('name')
p.add_argument('sources', nargs='*')
p.add_argument('--include-dirs', action='append', default=None)
p.add_argument('--verbose', action='store_true', default=False)
p.add_argument('--package-path', default=None)
p.add_argument('--user-ics', action='store_true', default=False)
p.add_argument('--fc', default=False)
args = p.parse_args()
if args.name == 'matfuncs':
return build_mml_matrix_functions()
if not args.sources:
raise ValueError('Missing sources argument')
build_extension_module(args.name, args.sources,
include_dirs=args.include_dirs,
verbose=args.verbose,
user_ics=args.user_ics,
fc=args.fc, cwd=args.package_path)
if __name__ == '__main__':
main()
| bsd-3-clause | -2,366,992,133,191,876,600 | 33.02439 | 123 | 0.619662 | false |
prescott66/Cnchi | src/encfs.py | 1 | 3865 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# encfs.py
#
# Copyright 2013 Antergos
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Configures Antergos to encrypt user's home with encFS """
#import logging
import os
import shutil
import subprocess
def setup(username, dest_dir):
""" Encrypt user's home folder """
# encfs pam_mount packages are needed
# pam_encfs from AUR
# https://wiki.debian.org/TransparentEncryptionForHomeFolder
# Edit configuration files
name = os.path.join(dest_dir, "etc/security/pam_encfs.conf")
shutil.copy(name, name + ".cnchi")
with open(name, "r") as pam_encfs:
lines = pam_encfs.readlines()
i = len(lines) - 1
lines[i] = "# " + lines[i]
with open(name, "w") as pam_encfs:
pam_encfs.write(lines)
pam_encfs.write("# Added by Cnchi - Antergos Installer\n")
pam_encfs.write("-\t/home/.encfs\t-\t-v\t-\n")
name = os.path.join(dest_dir, "etc/security/pam_env.conf")
shutil.copy(name, name + ".cnchi")
with open(name, "a") as pam_env:
pam_env.write("# Added by Cnchi - Antergos Installer\n")
pam_env.write("# Set the ICEAUTHORITY file location to allow GNOME to start on encfs $HOME\n")
pam_env.write("ICEAUTHORITY DEFAULT=/tmp/.ICEauthority_@{PAM_USER}\n")
name = os.path.join(dest_dir, "etc/fuse.conf")
shutil.copy(name, name + ".cnchi")
with open(name, "a") as fuse_conf:
fuse_conf.write("# Added by Cnchi - Antergos Installer\n")
fuse_conf.write("user_allow_other\n")
name = os.path.join(dest_dir, "etc/pam.d/system-login")
shutil.copy(name, name + ".cnchi")
with open(name, "a") as system_login:
system_login.write("# Added by Cnchi - Antergos Installer\n")
system_login.write("session required\tpam_encfs.so\n")
system_login.write("session optional\tpam_mount.so\n")
name = os.path.join(dest_dir, "etc/pam.d/system-auth")
shutil.copy(name, name + ".cnchi")
with open(name, "a") as system_auth:
system_auth.write("# Added by Cnchi - Antergos Installer\n")
system_auth.write("auth sufficient\tpam_encfs.so\n")
system_auth.write("auth optional\tpam_mount.so\n")
# Setup finished
# Move user home dir out of the way
mounted_dir = os.path.join(self.dest_dir, "home/", username)
backup_dir = os.path.join(self.dest_dir, "var/tmp/", username)
subprocess.check_call(['mv', src_dir, backup_dir])
# Create necessary dirs, encrypted and mounted(unecrypted)
encrypted_dir = os.path.join(self.dest_dir, "home/.encfs/", username)
subprocess.check_call(['mkdir', '-p', encrypted_dir, mounted_dir])
# Set owner
subprocess.check_call(['chown', '%s:users' % username, encrypted_dir, mounted_dir])
# Create encrypted directory
subprocess.check_call(['encfs', '-v', encrypted_dir, mounted_dir])
# Restore user home files
src = os.path.join(backup_dir, "*")
subprocess.check_call(['mv', src, mounted_dir])
src = os.path.join(backup_dir, ".[A-Za-z0-9]*")
subprocess.check_call(['mv', src, mounted_dir])
# Delete home backup
subprocess.check_call(['rmdir', backup_dir])
| gpl-3.0 | -6,783,151,214,010,890,000 | 36.892157 | 102 | 0.663648 | false |
andredias/nikola | nikola/plugins/command/new_post.py | 1 | 19809 | # -*- coding: utf-8 -*-
# Copyright © 2012-2017 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Create a new post."""
from __future__ import unicode_literals, print_function
import io
import datetime
import operator
import os
import shutil
import subprocess
import sys
from blinker import signal
import dateutil.tz
from nikola.plugin_categories import Command
from nikola import utils
COMPILERS_DOC_LINK = 'https://getnikola.com/handbook.html#configuring-other-input-formats'
POSTLOGGER = utils.get_logger('new_post', utils.STDERR_HANDLER)
PAGELOGGER = utils.get_logger('new_page', utils.STDERR_HANDLER)
LOGGER = POSTLOGGER
def get_default_compiler(is_post, compilers, post_pages):
"""Given compilers and post_pages, return a reasonable default compiler for this kind of post/page."""
# First throw away all the post_pages with the wrong is_post
filtered = [entry for entry in post_pages if entry[3] == is_post]
# Get extensions in filtered post_pages until one matches a compiler
for entry in filtered:
extension = os.path.splitext(entry[0])[-1]
for compiler, extensions in compilers.items():
if extension in extensions:
return compiler
# No idea, back to default behaviour
return 'rest'
def get_date(schedule=False, rule=None, last_date=None, tz=None, iso8601=False):
"""Return a date stamp, given a recurrence rule.
schedule - bool:
whether to use the recurrence rule or not
rule - str:
an iCal RRULE string that specifies the rule for scheduling posts
last_date - datetime:
timestamp of the last post
tz - tzinfo:
the timezone used for getting the current time.
iso8601 - bool:
whether to force ISO 8601 dates (instead of locale-specific ones)
"""
if tz is None:
tz = dateutil.tz.tzlocal()
date = now = datetime.datetime.now(tz)
if schedule:
try:
from dateutil import rrule
except ImportError:
LOGGER.error('To use the --schedule switch of new_post, '
'you have to install the "dateutil" package.')
rrule = None # NOQA
if schedule and rrule and rule:
try:
rule_ = rrule.rrulestr(rule, dtstart=last_date or date)
except Exception:
LOGGER.error('Unable to parse rule string, using current time.')
else:
date = rule_.after(max(now, last_date or now), last_date is None)
offset = tz.utcoffset(now)
offset_sec = (offset.days * 24 * 3600 + offset.seconds)
offset_hrs = offset_sec // 3600
offset_min = offset_sec % 3600
if iso8601:
tz_str = '{0:+03d}:{1:02d}'.format(offset_hrs, offset_min // 60)
else:
if offset:
tz_str = ' UTC{0:+03d}:{1:02d}'.format(offset_hrs, offset_min // 60)
else:
tz_str = ' UTC'
return (date.strftime('%Y-%m-%d %H:%M:%S') + tz_str, date)
class CommandNewPost(Command):
"""Create a new post."""
name = "new_post"
doc_usage = "[options] [path]"
doc_purpose = "create a new blog post or site page"
cmd_options = [
{
'name': 'is_page',
'short': 'p',
'long': 'page',
'type': bool,
'default': False,
'help': 'Create a page instead of a blog post. (see also: `nikola new_page`)'
},
{
'name': 'title',
'short': 't',
'long': 'title',
'type': str,
'default': '',
'help': 'Title for the post.'
},
{
'name': 'author',
'short': 'a',
'long': 'author',
'type': str,
'default': '',
'help': 'Author of the post.'
},
{
'name': 'tags',
'long': 'tags',
'type': str,
'default': '',
'help': 'Comma-separated tags for the post.'
},
{
'name': 'onefile',
'short': '1',
'type': bool,
'default': False,
'help': 'Create the post with embedded metadata (single file format)'
},
{
'name': 'twofile',
'short': '2',
'type': bool,
'default': False,
'help': 'Create the post with separate metadata (two file format)'
},
{
'name': 'edit',
'short': 'e',
'type': bool,
'default': False,
'help': 'Open the post (and meta file, if any) in $EDITOR after creation.'
},
{
'name': 'content_format',
'short': 'f',
'long': 'format',
'type': str,
'default': '',
'help': 'Markup format for the post (use --available-formats for list)',
},
{
'name': 'available-formats',
'short': 'F',
'long': 'available-formats',
'type': bool,
'default': False,
'help': 'List all available input formats'
},
{
'name': 'schedule',
'short': 's',
'type': bool,
'default': False,
'help': 'Schedule the post based on recurrence rule'
},
{
'name': 'import',
'short': 'i',
'long': 'import',
'type': str,
'default': '',
'help': 'Import an existing file instead of creating a placeholder'
},
{
'name': 'date-path',
'short': 'd',
'long': 'date-path',
'type': bool,
'default': False,
'help': 'Create post with date path (eg. year/month/day, see NEW_POST_DATE_PATH_FORMAT in config)'
},
]
def _execute(self, options, args):
"""Create a new post or page."""
global LOGGER
compiler_names = [p.name for p in
self.site.plugin_manager.getPluginsOfCategory(
"PageCompiler")]
if len(args) > 1:
print(self.help())
return False
elif args:
path = args[0]
else:
path = None
# Even though stuff was split into `new_page`, it’s easier to do it
# here not to duplicate the code.
is_page = options.get('is_page', False)
is_post = not is_page
content_type = 'page' if is_page else 'post'
title = options['title'] or None
author = options['author'] or ''
tags = options['tags']
onefile = options['onefile']
twofile = options['twofile']
import_file = options['import']
wants_available = options['available-formats']
date_path_opt = options['date-path']
date_path_auto = self.site.config['NEW_POST_DATE_PATH']
date_path_format = self.site.config['NEW_POST_DATE_PATH_FORMAT'].strip('/')
if wants_available:
self.print_compilers()
return
if is_page:
LOGGER = PAGELOGGER
else:
LOGGER = POSTLOGGER
if twofile:
onefile = False
if not onefile and not twofile:
onefile = self.site.config.get('ONE_FILE_POSTS', True)
content_format = options['content_format']
content_subformat = None
if "@" in content_format:
content_format, content_subformat = content_format.split("@")
if not content_format: # Issue #400
content_format = get_default_compiler(
is_post,
self.site.config['COMPILERS'],
self.site.config['post_pages'])
if content_format not in compiler_names:
LOGGER.error("Unknown {0} format {1}, maybe you need to install a plugin or enable an existing one?".format(content_type, content_format))
self.print_compilers()
return
compiler_plugin = self.site.plugin_manager.getPluginByName(
content_format, "PageCompiler").plugin_object
# Guess where we should put this
entry = self.filter_post_pages(content_format, is_post)
if entry is False:
return 1
if import_file:
print("Importing Existing {xx}".format(xx=content_type.title()))
print("-----------------------\n")
else:
print("Creating New {xx}".format(xx=content_type.title()))
print("-----------------\n")
if title is not None:
print("Title:", title)
else:
while not title:
title = utils.ask('Title')
if isinstance(title, utils.bytes_str):
try:
title = title.decode(sys.stdin.encoding)
except (AttributeError, TypeError): # for tests
title = title.decode('utf-8')
title = title.strip()
if not path:
slug = utils.slugify(title, lang=self.site.default_lang)
else:
if isinstance(path, utils.bytes_str):
try:
path = path.decode(sys.stdin.encoding)
except (AttributeError, TypeError): # for tests
path = path.decode('utf-8')
slug = utils.slugify(os.path.splitext(os.path.basename(path))[0], lang=self.site.default_lang)
if isinstance(author, utils.bytes_str):
try:
author = author.decode(sys.stdin.encoding)
except (AttributeError, TypeError): # for tests
author = author.decode('utf-8')
# Calculate the date to use for the content
schedule = options['schedule'] or self.site.config['SCHEDULE_ALL']
rule = self.site.config['SCHEDULE_RULE']
self.site.scan_posts()
timeline = self.site.timeline
last_date = None if not timeline else timeline[0].date
date, dateobj = get_date(schedule, rule, last_date, self.site.tzinfo, self.site.config['FORCE_ISO8601'])
data = {
'title': title,
'slug': slug,
'date': date,
'tags': tags,
'link': '',
'description': '',
'type': 'text',
}
if not path:
pattern = os.path.basename(entry[0])
suffix = pattern[1:]
output_path = os.path.dirname(entry[0])
if date_path_auto or date_path_opt:
output_path += os.sep + dateobj.strftime(date_path_format)
txt_path = os.path.join(output_path, slug + suffix)
meta_path = os.path.join(output_path, slug + ".meta")
else:
if date_path_opt:
LOGGER.warn("A path has been specified, ignoring -d")
txt_path = os.path.join(self.site.original_cwd, path)
meta_path = os.path.splitext(txt_path)[0] + ".meta"
if (not onefile and os.path.isfile(meta_path)) or \
os.path.isfile(txt_path):
# Emit an event when a post exists
event = dict(path=txt_path)
if not onefile: # write metadata file
event['meta_path'] = meta_path
signal('existing_' + content_type).send(self, **event)
LOGGER.error("The title already exists!")
LOGGER.info("Existing {0}'s text is at: {1}".format(content_type, txt_path))
if not onefile:
LOGGER.info("Existing {0}'s metadata is at: {1}".format(content_type, meta_path))
return 8
d_name = os.path.dirname(txt_path)
utils.makedirs(d_name)
metadata = {}
if author:
metadata['author'] = author
metadata.update(self.site.config['ADDITIONAL_METADATA'])
data.update(metadata)
# ipynb plugin needs the ipython kernel info. We get the kernel name
# from the content_subformat and pass it to the compiler in the metadata
if content_format == "ipynb" and content_subformat is not None:
metadata["ipython_kernel"] = content_subformat
# Override onefile if not really supported.
if not compiler_plugin.supports_onefile and onefile:
onefile = False
LOGGER.warn('This compiler does not support one-file posts.')
if onefile and import_file:
with io.open(import_file, 'r', encoding='utf-8') as fh:
content = fh.read()
elif not import_file:
if is_page:
content = self.site.MESSAGES[self.site.default_lang]["Write your page here."]
else:
content = self.site.MESSAGES[self.site.default_lang]["Write your post here."]
if (not onefile) and import_file:
# Two-file posts are copied on import (Issue #2380)
shutil.copy(import_file, txt_path)
else:
compiler_plugin.create_post(
txt_path, content=content, onefile=onefile, title=title,
slug=slug, date=date, tags=tags, is_page=is_page, **metadata)
event = dict(path=txt_path)
if not onefile: # write metadata file
with io.open(meta_path, "w+", encoding="utf8") as fd:
fd.write(utils.write_metadata(data))
LOGGER.info("Your {0}'s metadata is at: {1}".format(content_type, meta_path))
event['meta_path'] = meta_path
LOGGER.info("Your {0}'s text is at: {1}".format(content_type, txt_path))
signal('new_' + content_type).send(self, **event)
if options['edit']:
editor = os.getenv('EDITOR', '').split()
to_run = editor + [txt_path]
if not onefile:
to_run.append(meta_path)
if editor:
subprocess.call(to_run)
else:
LOGGER.error('$EDITOR not set, cannot edit the post. Please do it manually.')
def filter_post_pages(self, compiler, is_post):
"""Return the correct entry from post_pages.
Information based on:
* selected compilers
* available compilers
* post/page status
"""
compilers = self.site.config['COMPILERS']
post_pages = self.site.config['post_pages']
compiler_objs = self.site.compilers
# First throw away all the post_pages with the wrong is_post
filtered = [entry for entry in post_pages if entry[3] == is_post]
# These are the extensions supported by the required format
extensions = compilers.get(compiler)
if extensions is None:
if compiler in compiler_objs:
LOGGER.error("There is a {0} compiler available, but it's not set in your COMPILERS option.".format(compiler))
LOGGER.info("Read more: {0}".format(COMPILERS_DOC_LINK))
else:
LOGGER.error('Unknown format {0}'.format(compiler))
self.print_compilers()
return False
# Throw away the post_pages with the wrong extensions
filtered = [entry for entry in filtered if any([ext in entry[0] for ext in
extensions])]
if not filtered:
type_name = "post" if is_post else "page"
LOGGER.error("Can't find a way, using your configuration, to create "
"a {0} in format {1}. You may want to tweak "
"COMPILERS or {2}S in conf.py".format(
type_name, compiler, type_name.upper()))
LOGGER.info("Read more: {0}".format(COMPILERS_DOC_LINK))
return False
return filtered[0]
def print_compilers(self):
"""List all available compilers in a human-friendly format."""
# We use compilers_raw, because the normal dict can contain
# garbage coming from the translation candidate implementation.
# Entries are in format: (name, extensions, used_in_post_pages)
compilers_raw = self.site.config['_COMPILERS_RAW']
used_compilers = []
unused_compilers = []
disabled_compilers = []
for name, plugin in self.site.compilers.items():
if name in compilers_raw:
used_compilers.append([
name,
plugin.friendly_name or name,
compilers_raw[name],
True
])
else:
disabled_compilers.append([
name,
plugin.friendly_name or name,
(),
False
])
for name, (_, _, pi) in self.site.disabled_compilers.items():
if pi.details.has_option('Nikola', 'Friendlyname'):
f_name = pi.details.get('Nikola', 'Friendlyname')
else:
f_name = name
if name in compilers_raw:
unused_compilers.append([
name,
f_name,
compilers_raw[name],
False
])
else:
disabled_compilers.append([
name,
f_name,
(),
False
])
used_compilers.sort(key=operator.itemgetter(0))
unused_compilers.sort(key=operator.itemgetter(0))
disabled_compilers.sort(key=operator.itemgetter(0))
# We also group the compilers by status for readability.
parsed_list = used_compilers + unused_compilers + disabled_compilers
print("Available input formats:\n")
name_width = max([len(i[0]) for i in parsed_list] + [4]) # 4 == len('NAME')
fname_width = max([len(i[1]) for i in parsed_list] + [11]) # 11 == len('DESCRIPTION')
print((' {0:<' + str(name_width) + '} {1:<' + str(fname_width) + '} EXTENSIONS\n').format('NAME', 'DESCRIPTION'))
for name, fname, extensions, used in parsed_list:
flag = ' ' if used else '!'
flag = flag if extensions else '~'
extensions = ', '.join(extensions) if extensions else '(disabled: not in COMPILERS)'
print(('{flag}{name:<' + str(name_width) + '} {fname:<' + str(fname_width) + '} {extensions}').format(flag=flag, name=name, fname=fname, extensions=extensions))
print("""
More compilers are available in the Plugins Index.
Compilers marked with ! and ~ require additional configuration:
! not in the PAGES/POSTS tuples (unused)
~ not in the COMPILERS dict (disabled)
Read more: {0}""".format(COMPILERS_DOC_LINK))
| mit | -6,730,951,704,187,957,000 | 35.542435 | 174 | 0.548773 | false |
anmolshkl/oppia-ml | decision_tree.py | 1 | 1354 | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import metrics
from sklearn import tree
import numpy as np
import os
import random
import timeit
import yaml
import load_data
import utilities
def main():
# Load data
X_train, y_train, X_test, y_test = load_data.load_data()
# Transform data into a vector of TF-IDF values
count_vect = CountVectorizer(ngram_range=(1, 2))
X_train_counts = count_vect.fit_transform(X_train)
tfidf_transformer = TfidfTransformer(use_idf=True)
X_train_dtm = tfidf_transformer.fit_transform(X_train_counts)
# Transform test data
X_test_counts = count_vect.transform(X_test)
X_test_dtm = tfidf_transformer.fit_transform(X_test_counts)
# using default params, criterion=gini, max_depth=None ...
clf = tree.DecisionTreeClassifier()
clf.fit(X_train_dtm, y_train)
y_pred_class = clf.predict(X_test_dtm)
# utilities.print_misclassified_samples(X_test, y_pred_class, y_test)
utilities.print_stats(y_pred_class, y_test)
if __name__ == '__main__':
# probably not the best way to measure time, but, we only want a ballpark figure
execution_time = timeit.timeit("main()", setup="from __main__ import main", number=1)
print "Execution time={0} sec".format(execution_time) | apache-2.0 | 5,862,555,658,209,508,000 | 33.74359 | 89 | 0.718612 | false |
shadowmint/nwidget | lib/cocos2d-0.5.5/tools/uniform_snippet.py | 1 | 9402 | #!/usr/bin/python
# $Id:$
"""checks that certain code snipets are the same across the samples and test
directories.
This is a release helper.
Usage:
uniform_snippet.py [<task_selection>]
Where <task_selection> one of:
--report
list files using a different variation of reference snipet
--report-verbose
as report, but also prints the matching block
--report-bigs
list only files with a matching block bigger than the reference block
--report-using
list files using the exact reference snipet
--fix
any non compliant file is modified by replacing the matching block
with the reference block
console output is the list of modified files
Default option is --report
When using --report or --report-verbose the script return value will be
0 (success) if all files are compliant
nonzero otherwise
Release workflow:
1. run with report-bigs
for each file reported, analize if it should be the new reference snipet
or it is a special case.
If a special case, rename the class / funcion, else change the reference
snipet
2. run with report (and save the results)
If no file is reported, the job is done
If they are files reported
run with report-verbose
analize the results, renaming the non compliant classes / functions
when it is a special case
3. run with fix
to replace the remaining non compliant snipets with the referece snipet
4. test all the scripts changed in the process.
"""
import os
import sys
##def chain(*iterables):
## # chain('ABC', 'DEF') --> A B C D E F
## for it in iterables:
## for element in it:
## yield element
class SnipetCompliance(object):
@classmethod
def set_target(cls, reference_text, start_mark=None):
reference_text = reference_text.rstrip()
cls.reference_lines = reference_text.split('\n')[1:]
infered_start_mark = cls.reference_lines[0].strip().replace(' ', '')
if start_mark is None:
cls.begin_string = infered_start_mark
else:
assert infered_start_mark.startswith(start_mark)
cls.begin_string = start_mark
def __init__(self, fname, text):
self.fname = fname
self.text = text
self.prepare()
def prepare(self):
self.lines = self.text.split('\n')
self.iter_enumerated_lines = enumerate(self.lines)
self.compliant = None
self.is_present = None
def is_compliant(self):
if self.compliant is not None:
return self.compliant
self.start_line = get_start_line(self.iter_enumerated_lines, self.begin_string)
self.is_present = self.start_line is not None
self.endplus_line = None
if not self.is_present:
self.compliant = True
else:
self.endplus_line = get_endplus_line(self.iter_enumerated_lines)
# a bit strong (whitespace matters), but lets go with simple code
self.compliant = (self.reference_lines ==
self.lines[self.start_line : self.endplus_line])
return self.compliant
def matched(self):
if self.is_present is None:
self.is_compliant()
## print 'self.is_present:', self.is_present
## print 'self.compliant:', self.compliant
if self.is_present:
## print 'start_line:', self.start_line
## print 'endplus_line:', self.endplus_line
s = '\n'.join(self.lines[self.start_line: self.endplus_line])
else:
s = ''
return s
def bigger(self):
return (self.endplus_line - self.start_line) > len(self.reference_lines)
def enforce_compliance(self):
if self.is_compliant():
return
# replace matched code by the reference code
self.lines[self.start_line : self.endplus_line] = self.reference_lines
self.text = '\n'.join(self.lines)
self.prepare()
self.is_compliant()
assert self.compliant
#self.compliant = True
reference = """
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
"""
def get_start_line(iter_enumerated_lines, target):
"""-> line number where the target string matches the start of line
Consumes the iterator until a line with a match is found or untilt the
iterator is exhausted.
Returns the line number where the match was found or None if no match.
After the return, a call to the iterator 'next' method would yield
the (lineno, line) for the line following the matching line.
It discards whitespace in the lines provided by the iterator before
comparison.
"""
try:
while 1:
lineno, line = iter_enumerated_lines.next()
line = line.strip()
line = line.replace(' ', '')
if line.startswith(target):
start_line = lineno
break
except StopIteration:
# target not present
start_line = None
return start_line
def get_endplus_line(iter_enumerated_lines):
"""
Advances the iterator until a nonblank line with zero indentation is found.
Returns the line number of the last non whitespace line with indentation
greater than zero.
"""
# seek end of object code as the next line with zero indentation
# will broke with comments at 0 indent amidst the object code
# class / func definition should be in lines[start_line : endplus_line]
# trailing whitespace lines are not included
last_no_blank = None
while 1:
try:
lineno, line = iter_enumerated_lines.next()
except StopIteration:
# EOF
break
if len(line)>0 and not line[0].isspace():
# a line with indentation zero, the object code should
# have ended at most one line above
if last_no_blank is None:
last_no_blank = lineno-1
break
if len(line)>0 and not line.isspace():
last_no_blank = lineno
return last_no_blank + 1
##def save_modified(file_):
## for line in itertools.chain(
## lines[start_line: start_line+len(reference_lines)],
## reference_lines,
## ['\n',],
## lines[endplus_line: -1]):
## file_.write(line)
if __name__ == '__main__':
task_to_flags = {
#key = (report_compliant, report_only_big, report_show_matching_block, fix)
'--report': (False, False, False, False),
'--report-verbose': (False, False, True, False),
'--report-bigs': (False, True, True, False),
'--report-using': (True, False, False, False),
'--fix': (False, False, False, True)
}
if len(sys.argv) == 1:
task = '--report'
elif len(sys.argv)==2 and sys.argv[1] in task_to_flags:
task = sys.argv[1]
else:
print >> sys.stderr, __doc__
sys.exit(0)
(report_compliant, report_only_big,
report_show_matching_block, fix) = task_to_flags[task]
# Asumes script runs from trunk/tools
# Only scripts strictly in the test directory are considered.
dir_path = '../test'
SnipetCompliance.set_target(reference, 'classBackgroundLayer(')
all_compliant = True
for short_name in os.listdir(dir_path):
if short_name.endswith('.py'):
# use sep = '/' even in windows to better cooperation with autotest
fname = '/'.join([dir_path, short_name])
f = open(fname, 'rb')
text = f.read()
f.close()
worker = SnipetCompliance(fname, text)
file_compliant = worker.is_compliant()
all_compliant = all_compliant and file_compliant
if report_compliant == file_compliant:
if file_compliant and worker.is_present:
# strip initial '../' for better colaboration with autotest
print fname[3:]
else:
if (not report_only_big or
(report_only_big and worker.bigger())):
# strip initial '../' for better colaboration with autotest
print fname[3:]
if report_show_matching_block:
print '\n>>>'
print 'matched:'
print worker.matched()
print '<<<\n'
if fix and not file_compliant:
worker.enforce_compliance()
f = open(fname, 'wb')
f.write(worker.text)
f.close()
cmd_result = not all_compliant and task in ['--report', '--report-verbose']
sys.exit(cmd_result)
| apache-2.0 | 3,861,327,580,383,940,000 | 33.345865 | 87 | 0.573389 | false |
hack-shack/rorbuild | pyBuilder/MoFileReaderBuilder.py | 1 | 5567 | # MoFileReaderBuilder.py
# Last modified by : terpsichorean, 2014-03-31
# RoR target version : 0.4.0.7
# OS X target version : 10.9.2
# Win32 target version : n/a
# Win64 target version : n/a
# Description : Builds MoFileReader.
# RoR :
# documentation : none on RoR site.
# documentation req. : http://www.rigsofrods.com/wiki/pages/Compiling_3rd_party_libraries
# OS X :
# documentation : http://wiki.wxwidgets.org/Compiling_wxWidgets_using_the_command-line_(Terminal)
# dependencies : Xcode, Xcode Command Line Tools
from pyBuilder import *
class MoFileReaderBuilder(BuildCMakeTarget):
def __init__(self):
self.initPaths()
def extract(self):
rorbuild_root = globals()['ROOT']
if 'osx_10.9' in globals()['PLATFORMS']:
print('Root directory: ' + rorbuild_root + '/build/mofilereader')
# Git clone from code.google.com
if not os.path.isdir(rorbuild_root + '/build'):
print(' Could not find build directory. Creating...')
os.mkdir(rorbuild_root + '/build')
if os.path.isdir(rorbuild_root + '/build/mofilereader'):
print(" Source directory found. Looks like it's been cloned already.")
return 0
elif not os.path.isdir(rorbuild_root + '/build/mofilereader'):
print(" Source directory not found. '")
print(" Running 'git clone'...")
os.chdir(rorbuild_root + '/build')
os.system('git clone https://code.google.com/p/mofilereader')
return 0
elif 'x86' in globals()['PLATFORMS']:
# Unzip the v26-dependencies-win source code
result_code = 1
result_code |= self.unzip('files/moFileReader.0.1.2.zip')
result_code |= self.unzip('files/moFileReader-headerfix.0.1.2.zip', self.path+'/include/')
return result_code
else:
print "Build platform not supported. See -def extract- in MoFileReaderBuilder.py."
return 1
def configure(self):
rorbuild_root = globals()['ROOT']
if 'osx_10.9' in globals()['PLATFORMS']:
# check for GitHub clone; run CMake
if os.path.isdir(rorbuild_root + '/build/mofilereader'):
print('Found source directory: ' + rorbuild_root + '/build/mofilereader')
print('Starting CMake...')
os.chdir(rorbuild_root + '/build/mofilereader/build')
os.system('cmake .')
return 0
elif not os.path.isdir(rorbuild_root + '/build/mofilereader'):
print 'No source directory found. Cloning...'
os.chdir(rorbuild_root + '/build')
self.extract()
return 0
elif 'x86' in globals()['PLATFORMS']:
# Windows
self.mkd('build/build_'+self.arch)
return self.execute(r"""
cd %(path)s\build\build_%(arch)s
@call:checkReturnValue
cmake -G %(generator)s ..
@call:checkReturnValue
""")
def build(self):
rorbuild_root = globals()['ROOT']
if 'osx_10.9' in globals()['PLATFORMS']:
# TODO: Exception handler for failed make
self.banner('Target: ' + self.target + ' / '+ 'Configuration: ' + self.configuration + ' / ' + 'Architecture: ' + self.arch + ' / ' + 'Platform: ' + self.platform)
print(' Running make...')
os.chdir(rorbuild_root + '/build/mofilereader/build')
os.system('make')
return 0
elif 'x86' in globals()['PLATFORMS']:
self.platform = 'Win32'
self.arch = 'x86'
self.banner('Target: ' + self.target + ' / '+ 'Configuration: ' + self.configuration + ' / ' + 'Architecture: ' + self.arch + ' / ' + 'Platform: ' + self.platform)
return self.execute(r"""
cd %(path)s\build\build_%(arch)s
@call:checkReturnValue
msbuild %(target)s.sln /t:rebuild /p:Configuration=%(configuration)s /p:Platform=%(platform)s /verbosity:%(vsverbosity)s /nologo /maxcpucount:%(maxcpu)d
@call:checkReturnValue
""")
def install(self):
rorbuild_root = globals()['ROOT']
if 'osx_10.9' in globals()['PLATFORMS']:
target = 'osx_10.9'
build_configuration = 'RelWithDebInfo' # TODO: add list support for multiple build configs
print ' Installing MoFileReader...'
print ' --------------------------'
header_source_dir = rorbuild_root + '/build/mofilereader/include/'
header_target_dir = rorbuild_root + '/include/' + target + '/MoFileReader/'
library_source_dir = rorbuild_root + '/build/mofilereader/lib/'
library_target_dir = rorbuild_root + '/lib/' + target + '/MoFileReader/' + build_configuration
BuildTarget.create_target_directory(header_target_dir)
BuildTarget.create_target_directory(library_target_dir)
BuildTarget.install_built_files('*.h', header_source_dir, header_target_dir)
BuildTarget.install_built_files('*.a', library_source_dir, library_target_dir)
return 0
elif 'x86' in globals()['PLATFORMS']:
self.installIncludes('include/')
self.installLibs('build/lib/%(conf)s/%(target)s*.lib')
self.installBinaries('build/lib/%(conf)s/%(target)s*.pdb', False) #optional
return 0
| gpl-3.0 | 2,047,684,479,248,826,000 | 44.008264 | 175 | 0.569786 | false |
NERC-CEH/jules-jasmin | majic/joj/utils/general_controller_helper.py | 1 | 5711 | """
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
from decorator import decorator
from pylons import config, url
from joj.lib.base import render, c, redirect
from joj.lib.wmc_util import create_request_and_open_url
log = logging.getLogger(__name__)
@decorator
def must_be_admin(func, self, *args, **kwargs):
"""
Decorator to add to an action in a controller which means that if
the user is not an admin the not found page is rendered
:param func: function that is decorated
:param args: arguments for the function
:param kwargs: dictionary for the functions
:return: rendered html
"""
access_is_ok = False
try:
access_is_ok = self.current_user is not None and self.current_user.is_admin()
except Exception:
log.exception("Exception when accessing a admin only page")
# call to render page must be outside exception block otherwise redirects do not work
if access_is_ok:
return func(self, *args, **kwargs)
return render('not_found.html')
def put_errors_in_table_on_line(errors, error_key, field_name):
"""
Make a set of errors which are in one category onto one input
:param errors: the errors list
:param error_key: main key for the error, e.g. region which would be a list of region error dictionaries
:param field_name: name of the field to add the errors too
:return: for each line a list of errors on that line (returns a list of lists)
"""
extra_errors = []
error_list = errors.get(error_key)
if error_list is not None:
for error, index in zip(error_list, range(len(error_list))):
extra_errors.append([])
if error is not None and len(error) is not 0:
errors["{}-{}.{}".format(error_key, index, field_name)] = "Please correct"
for key, error_messages in error.iteritems():
if type(error_messages) is list:
for error_message in error_messages:
extra_errors[index].append(error_message)
else:
extra_errors[index].append(error_messages)
del errors[error_key]
return extra_errors
def remove_deleted_keys(values, count_key, var_prefix, fieldnames):
"""
Remove from values any indexes from deleted keys. I.e. if there are 3 entries but index 1 isn't present. reset to
2 entries and rename values from 2 to 1.
:param values: dictionary of values
:param count_key: the ket in that dictionary of the count of variables
:param var_prefix: the prefix for the variables
:param fieldnames: the fields associated with the prefix
"""
try:
current_count = int(values[count_key])
except (KeyError, ValueError):
current_count = 0
new_index = 0
for i in range(current_count):
name = "{}-{}.{}"
if name.format(var_prefix, i, fieldnames[0]) in values:
if new_index != i:
for field in fieldnames:
value_name = name.format(var_prefix, i, field)
value_new_name = name.format(var_prefix, new_index, field)
value = values[value_name]
del values[value_name]
values[value_new_name] = value
new_index += 1
values[count_key] = new_index
@decorator
def show_error_if_thredds_down(func, self, *args, **kwargs):
"""
Renders an error page if the THREDDS server cannot be contacted
:param func: Function that is decorated
:param args: Arguments for the function
:param kwargs: Named key word arguments
:return: Rendered HTML
"""
if is_thredds_up(config):
return func(self, *args, **kwargs)
else:
c.admin_email = config["email.admin_address"]
page = render("map_down.html")
return page
def is_thredds_up(config):
"""
Is the THREDDS Server running?
:param config: Pylons configuration
:return:
"""
if config.get('run_in_test_mode') == 'true':
return True
else:
try:
create_request_and_open_url(
config['thredds.server_url'],
timeout=int(config['thredds.server_timeout'])).read()
return True
except:
return False
def redirect_back_to_came_from_for_model_run(id, request_params):
"""
Throw redirect back to page that user cam from
:param id: id of model run
:param request_params: the request parameters
:return: nothing
"""
came_from = request_params.get('came_from', "")
if came_from == 'summary':
redirect(url(controller='model_run', action='summary', id=id))
elif came_from == 'index':
redirect(url(controller='model_run', action='index'))
else:
redirect(url(controller='model_run', action='index')) | gpl-2.0 | -2,567,025,911,908,129,000 | 35.851613 | 117 | 0.625985 | false |
oroszgy/spaCy.hu | spacy/cli/train.py | 1 | 4151 | # coding: utf8
from __future__ import unicode_literals, division, print_function
import json
from pathlib import Path
from ..scorer import Scorer
from ..tagger import Tagger
from ..syntax.parser import Parser
from ..gold import GoldParse, merge_sents
from ..gold import read_json_file as read_gold_json
from .. import util
def train(language, output_dir, train_data, dev_data, n_iter, tagger, parser, ner,
parser_L1):
output_path = Path(output_dir)
train_path = Path(train_data)
dev_path = Path(dev_data)
check_dirs(output_path, train_path, dev_path)
lang = util.get_lang_class(language)
parser_cfg = {
'pseudoprojective': True,
'L1': parser_L1,
'n_iter': n_iter,
'lang': language,
'features': lang.Defaults.parser_features}
entity_cfg = {
'n_iter': n_iter,
'lang': language,
'features': lang.Defaults.entity_features}
tagger_cfg = {
'n_iter': n_iter,
'lang': language,
'features': lang.Defaults.tagger_features}
gold_train = list(read_gold_json(train_path))
gold_dev = list(read_gold_json(dev_path)) if dev_path else None
train_model(lang, gold_train, gold_dev, output_path, tagger_cfg, parser_cfg,
entity_cfg, n_iter)
if gold_dev:
scorer = evaluate(lang, gold_dev, output_path)
print_results(scorer)
def train_config(config):
config_path = Path(config)
if not config_path.is_file():
util.sys_exit(config_path.as_posix(), title="Config file not found")
config = json.load(config_path)
for setting in []:
if setting not in config.keys():
util.sys_exit("{s} not found in config file.".format(s=setting),
title="Missing setting")
def train_model(Language, train_data, dev_data, output_path, tagger_cfg, parser_cfg,
entity_cfg, n_iter):
print("Itn.\tN weight\tN feats\tUAS\tNER F.\tTag %\tToken %")
with Language.train(output_path, train_data, tagger_cfg, parser_cfg, entity_cfg) as trainer:
loss = 0
for itn, epoch in enumerate(trainer.epochs(n_iter, augment_data=None)):
for doc, gold in epoch:
trainer.update(doc, gold)
dev_scores = trainer.evaluate(dev_data) if dev_data else []
print_progress(itn, trainer.nlp.parser.model.nr_weight,
trainer.nlp.parser.model.nr_active_feat,
**dev_scores.scores)
def evaluate(Language, gold_tuples, output_path):
print("Load parser", output_path)
nlp = Language(path=output_path)
scorer = Scorer()
for raw_text, sents in gold_tuples:
sents = merge_sents(sents)
for annot_tuples, brackets in sents:
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.parser(tokens)
nlp.entity(tokens)
else:
tokens = nlp(raw_text)
gold = GoldParse.from_annot_tuples(tokens, annot_tuples)
scorer.score(tokens, gold)
return scorer
def check_dirs(output_path, train_path, dev_path):
if not output_path.exists():
util.sys_exit(output_path.as_posix(), title="Output directory not found")
if not train_path.exists():
util.sys_exit(train_path.as_posix(), title="Training data not found")
if dev_path and not dev_path.exists():
util.sys_exit(dev_path.as_posix(), title="Development data not found")
def print_progress(itn, nr_weight, nr_active_feat, **scores):
tpl = '{:d}\t{:d}\t{:d}\t{uas:.3f}\t{ents_f:.3f}\t{tags_acc:.3f}\t{token_acc:.3f}'
print(tpl.format(itn, nr_weight, nr_active_feat, **scores))
def print_results(scorer):
results = {
'TOK': '%.2f' % scorer.token_acc,
'POS': '%.2f' % scorer.tags_acc,
'UAS': '%.2f' % scorer.uas,
'LAS': '%.2f' % scorer.las,
'NER P': '%.2f' % scorer.ents_p,
'NER R': '%.2f' % scorer.ents_r,
'NER F': '%.2f' % scorer.ents_f}
util.print_table(results, title="Results")
| mit | -7,030,617,258,275,148,000 | 35.095652 | 96 | 0.60371 | false |
DerekRies/starmadepy | starmadepy/starmade.py | 1 | 16611 | import json
import binascii
import copy
import pkgutil
from bisect import bisect_left
from utils import tuple_add, tuple_sub, plural, bits, split_every_nchars
from binary import BinaryStream, BitPacker
"""
Starmade.py is a collection of various helpers for manipulating Starmade data
"""
# items-complete.json has the expanded meta data needed for items
# including things like: shape, armor tier, and color
# item_data_path = 'starmadepy/data/items-complete.json'
# fh = open(item_data_path, 'r')
fh = pkgutil.get_data('starmadepy', 'data/items-complete.json')
item_data = json.loads(fh)
items = item_data['items']
# fh.close()
id_map = {}
name_map = {}
SHAPES = item_data['shapes']
ARMOR = item_data['armor_tiers']
for i, item in enumerate(item_data['items']):
id_map[item['id']] = i
name_map[item['name']] = i
def shape(s):
return SHAPES[s.lower()]
def tier(t):
return ARMOR[t.lower()]
class Block:
"""Block class serves two roles
1. to provide an interface for the starmade block database
2. to represent an actual block in a BlockGroup (template or bp)
"""
def __init__(
self,
item_id,
posx=0,
posy=0,
posz=0,
orientation=0,
active=False):
# Creates a block from a supported item id
if type(item_id) is not int:
raise Exception("""Item ID is not an int. If you\'re using a block
name then use the Block.from_itemname method.""")
data_index = id_map[item_id]
data = items[data_index]
self.name = data['name']
self.id = data['id']
self.color = data['color']
self.tier = data['tier']
self.shape = data['shape']
self.posx = posx
self.posy = posy
self.posz = posz
self.orientation = orientation
# For my purposes I'll be representing both ON and OPEN as a True state
# OFF and Closed will be represented as a False state
self.active = active
self.door = data.get('door', False)
self.hitpoints = 1
def props(self):
return ['color', 'tier', 'shape']
@classmethod
def from_stream(cls, stream):
"""
Preferred method of parsing block data from a file, can be shared
between templates and blueprints.
Bit-packed in 3 bytes to represent block data
- start with last 11 bits for the block id
- if block has an active state, first 3 bits are orientation
and 4th bit is active state
- if block is a corner then first 5 bits are orientation, else 4
- remainder of bits are block hitpoints value
"""
total_bits = stream.readNBytesAsBits(3)
block_id = int(total_bits[-11:], 2)
block = cls(block_id)
block.hitpoints = int(total_bits[5:-11], 2)
if block.shape == shape('corner'):
# corners require more bits to represent adequately
block.orientation = int(total_bits[:5], 2)
else:
orient_bits = total_bits[:4]
active_bit = total_bits[4]
block.active = not bool(int(active_bit))
block.orientation = int(orient_bits, 2)
# I'm representing doors the other way around as it makes sense
# to me.
# OPEN = ON = 0, CLOSED = OFF = 1
if block.door:
block.active = not block.active
return block
def serialize_to_stream(self, stream):
bitpacker = BitPacker()
if self.shape == shape('corner'):
bitpacker.pack(self.orientation, 5)
else:
bitpacker.pack(self.orientation, 4)
if self.door:
bitpacker.pack(self.active, 1)
else:
bitpacker.pack(not self.active, 1)
bitpacker.pack(self.hitpoints, 8)
bitpacker.pack(self.id, 11)
bitpacker.write(stream)
@classmethod
def from_itemname(cls, name):
# Creates a block from a item name (string)
new_block = cls(cls.map_name_to_id(name))
return new_block
@classmethod
def map_name_to_id(cls, name):
return items[name_map[name]]['id']
@classmethod
def map_id_to_name(cls, id):
return items[id_map[id]]['name']
@classmethod
def search(cls, **kwargs):
"""
Searches the prototypes of all blocks for parameters matching those
supplied. Does not search any active blocks, this is just the class
here.
Querying actual instances of blocks will be handled in BlockGroup
classes like Templates and Blueprints.
"""
def match(item):
return all([(item[k] == v) for k, v in kwargs.iteritems()])
return filter(match, items)
return None
@classmethod
def search_first(cls, **kwargs):
return cls.search(**kwargs)[0]
# Active State Methods
# Opening and Closing are really just aliases that might throw an
# exception later if used by Blocks that can't be made invisible
# by opening them
def on(self):
self.active = True
def open(self):
self.on()
def off(self):
self.active = False
def close(self):
self.off()
def toggle(self):
self.active = not self.active
def copy(self, n_copies=1):
return copy.deepcopy(self)
def change_block_data(self, new_block):
for k, v in new_block.iteritems():
setattr(self, k, v)
def change_color(self, new_color):
"""
Expects a string of equal to:
'grey', 'white', 'black', 'purple', 'blue', 'red', 'green', 'orange',
'yellow', 'yellow hazard', or 'green hazard'.
"""
# find appropriate block of the new color provided
if new_color != self.color:
new_block = Block.search_first(
color=new_color, tier=self.tier, shape=self.shape)
self.change_block_data(new_block)
def change_tier_word(self, new_tier):
# Takes a tier, as a string, and changes the block to match that new
# tier
self.change_tier(tier(new_tier))
def change_tier(self, new_tier):
# Takes a tier, as an int, and changes the block to match that new tier
if new_tier != self.tier:
new_block = Block.search_first(
tier=new_tier, color=self.color, shape=self.shape)
self.change_block_data(new_block)
def change_shape(self, new_shape):
if new_shape != self.shape:
new_block = Block.search_first(
shape=new_shape, tier=self.tier, color=self.color)
self.change_block_data(new_block)
def change(self, **kwargs):
# TODO: Needs more tests to make sure this is working properly
for prop in self.props():
if prop not in kwargs:
kwargs[prop] = getattr(self, prop)
self.change_block_data(Block.search_first(**kwargs))
def move_to(self, nx=0, ny=0, nz=0):
# Should be used to change a blocks position to a new one
self.posx = nx
self.posy = ny
self.posz = nz
def move(self, *args):
# Placeholder for the moment
# Should be used to move a block in relation to its current position
self.move_to(*args)
def set_position(self, x=0, y=0, z=0):
self.posx = x
self.posy = y
self.posz = z
def get_position(self):
return (self.posx, self.posy, self.posz)
def info(self):
print "Item Name: %s" % self.name
print "Item ID: %s" % self.id
print "Position: %s, %s, %s" % (self.posx, self.posy, self.posz)
print "Item Color: %s" % self.color
print "Item Shape: %s" % self.shape
print "Armor Tier: %s" % self.tier
print "Door: %s" % self.door
class BlockGroup:
"""Used to share functionality and a common interface between Templates
and Blueprints
"""
def __init__(self):
# Creates an empty block group
self.name = None
self.header = None
self.blocks = []
# Connections will be tuples with the master first and the slave second
self.connections = []
# Header Info
self.version = 1
self.bound_lower = None
self.bound_upper = None
def empty(self):
self.blocks = []
self.connections = []
def get_connection_groups(self):
connection_groups = []
last_master = None
cur_pos = -1
for pair in sorted(
self.connections, key=lambda p: p[1].get_position()):
master = pair[1]
slave = pair[0]
if master != last_master:
cur_pos += 1
group = [master, slave]
connection_groups.append(group)
last_master = master
else:
connection_groups[cur_pos].append(slave)
return connection_groups
def num_blocks(self):
return len(self.blocks)
def num_connections(self):
return len(self.connections)
def box_dimensions(self):
# Get min values for each axis
if self.num_blocks() == 0:
return (0, 0, 0)
minx = min(block.posx for block in self.blocks)
miny = min(block.posy for block in self.blocks)
minz = min(block.posz for block in self.blocks)
mins = (minx, miny, minz)
# Get max values for each axis
maxx = max(block.posx for block in self.blocks)
maxy = max(block.posy for block in self.blocks)
maxz = max(block.posz for block in self.blocks)
maxs = (maxx, maxy, maxz)
dif = tuple_sub(maxs, mins)
return tuple_add(dif, (1, 1, 1))
def count_by_block(self):
b_count = {}
if self.num_blocks() != 0:
for block in self.blocks:
count = b_count.get(block.name, 0) + 1
b_count[block.name] = count
return b_count
def add(self, block):
self.blocks.append(block)
def replace(self, source_query, changes):
"""
Match all blocks belonging to this template that meet
the source query, and apply the following changes to them.
ex: Get all the orange blocks, and make them blue
t = Template()...
t.replace({color: 'orange'}, {color: 'blue'})
ex: Get all the orange wedges and turn them into blocks
t.replace({color: 'orange', shape: 'wedge'}, {shape: 'block'})
"""
blocks = self.get_all_blocks(**source_query)
for block in blocks:
block.change(**changes)
def get_all_blocks(self, **kwargs):
"""
Returns all blocks that match the query provided
TODO: Allow for more complex filters, like ranges, or multiple options
for specific block properties
"""
queried_blocks = []
for block in self.blocks:
filters = [bool(getattr(block, key) == val)
for key, val in kwargs.iteritems()]
if all(filters):
queried_blocks.append(block)
return queried_blocks
def get_block_at(self, x, y, z):
pos_args = {'posx': x, 'posy': y, 'posz': z}
blocks = self.get_all_blocks(**pos_args)
if len(blocks):
return blocks[0]
return None
def connect_blocks(self, master, slave):
"""
Creates a connection pair in the template between two blocks, a master
and a slave. These are actual Block instances.
"""
self.connections.append((master, slave))
def connect_blocks_at(self, master_pos, slave_pos):
"""
Creates a connection pair in the template between two blocks that are
specified by their coordinates. master_pos and slave_pos should be a
tuple like (x,y,z)
"""
master = self.get_block_at(*master_pos)
slave = self.get_block_at(*slave_pos)
self.connect_blocks(master, slave)
self._print_connection(master_pos, slave_pos)
def _print_connection(self, pos_a, pos_b):
print str(pos_a) + ' --> ' + str(pos_b)
def _print_connections(self):
"""Debugging method to make seeing the connections between blocks
visual and easy
"""
for pair in self.connections:
if pair[0] is None:
bpos = str(pair[1].get_position())
print "None --> %s (%s)" % (pair[1].name, bpos)
elif pair[1] is None:
apos = str(pair[0].get_position())
print "%s (%s) --> None" % (pair[0].name, apos)
else:
apos = str(pair[0].get_position())
bpos = str(pair[1].get_position())
print "%s (%s) --> %s (%s)" % (
pair[0].name, apos, pair[1].name, bpos)
def _print_block_states(self):
"""Debugging method to make seeing which blocks are currently active
or open, easier"""
for block in self.blocks:
print '{0}: {1}'.format(block.name, block.active)
def _print_block_orientations(self):
for block in self.blocks:
print '{0} ({1}): {2}'.format(
block.name, str(block.get_position()), block.orientation)
class Template(BlockGroup):
"""Template deserialized from a .smtpl file or generated through code
composed of blocks and connections.
"""
def save(self, filepath):
with open(filepath, 'wb') as ofile:
stream = BinaryStream(ofile)
stream.writeUChar(self.version)
if self.bound_lower is None or self.bound_upper is None:
# create the bounds
self.bound_lower = (0, 0, 0)
self.bound_upper = self.box_dimensions()
stream.writeVec3Int32(self.bound_lower)
stream.writeVec3Int32(self.bound_upper)
stream.writeInt32(self.num_blocks())
for block in self.blocks:
stream.writeVec3Int32(block.get_position())
block.serialize_to_stream(stream)
# stream.writeInt32(0)
# Writing the Connections portion of the file
# Connections not supported yet so just writing 4 blank bytes
connection_groups = self.get_connection_groups()
stream.writeInt32(len(connection_groups))
for group in connection_groups:
master = group[0]
slaves = group[1:]
stream.writeInt16(0)
# Need to save coordinates backwards
stream.writeVec3Int16(master.get_position()[::-1])
stream.writeInt32(len(slaves))
for slave in slaves:
stream.writeInt16(0)
stream.writeVec3Int16(slave.get_position()[::-1])
@classmethod
def fromSMTPL(cls, smtpl_filepath, debug=False):
# Creates a template from a .smtpl file
t = cls()
t.name = smtpl_filepath
with open(smtpl_filepath, 'rb') as ifile:
stream = BinaryStream(ifile)
# t.header = stream.readBytes(25)
t.version = stream.readUChar()
t.bound_lower = stream.readVec3Int32()
t.bound_upper = stream.readVec3Int32()
n_blocks = stream.readInt32()
# Template Blocks
for i in xrange(n_blocks):
x, y, z = stream.readVec3Int32()
block = Block.from_stream(stream)
block.move_to(x, y, z)
t.add(block)
n_connection_groups = stream.readInt32()
# Template Connections
for j in xrange(n_connection_groups):
unknown_filler = stream.readInt16()
# Coordinates are saved as z,y,x so we need to reverse them
master_pos = stream.readVec3Int16()[::-1]
n_connections = stream.readInt32()
for x in xrange(n_connections):
unknown_filler3 = stream.readInt16()
# Saved backwards again
slave_pos = stream.readVec3Int16()[::-1]
t.connect_blocks_at(slave_pos, master_pos)
return t
@classmethod
def fromJSON(cls, json_filepath):
# Creates a template from a correctly formatted json file
return None
if __name__ == '__main__':
# fdata = pkgutil.get_data('', 'data/items-complete.json')
pass
| mit | 7,166,825,139,453,585,000 | 33.038934 | 79 | 0.569562 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/app/tests/test_tales.py | 1 | 19121 | # Copyright 2009-2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""tales.py doctests."""
from datetime import (
datetime,
timedelta,
)
from lxml import html
from pytz import utc
from zope.component import (
getAdapter,
getUtility,
)
from zope.traversing.interfaces import (
IPathAdapter,
TraversalError,
)
from lp.app.browser.tales import (
DateTimeFormatterAPI,
format_link,
ObjectImageDisplayAPI,
PersonFormatterAPI,
)
from lp.registry.interfaces.irc import IIrcIDSet
from lp.registry.interfaces.person import PersonVisibility
from lp.services.webapp.authorization import (
check_permission,
clear_cache,
precache_permission_for_objects,
)
from lp.services.webapp.servers import LaunchpadTestRequest
from lp.testing import (
login_person,
test_tales,
TestCase,
TestCaseWithFactory,
)
from lp.testing.layers import (
DatabaseFunctionalLayer,
FunctionalLayer,
LaunchpadFunctionalLayer,
)
def test_requestapi():
"""
>>> from lp.app.browser.tales import IRequestAPI, RequestAPI
>>> from lp.registry.interfaces.person import IPerson
>>> from zope.interface.verify import verifyObject
>>> class FakePrincipal:
... def __conform__(self, protocol):
... if protocol is IPerson:
... return "This is a person"
...
>>> class FakeApplicationRequest:
... principal = FakePrincipal()
... def getURL(self):
... return 'http://launchpad.dev/'
...
Let's make a fake request, where request.principal is a FakePrincipal
object. We can use a class or an instance here. It really doesn't
matter.
>>> request = FakeApplicationRequest()
>>> adapter = RequestAPI(request)
>>> verifyObject(IRequestAPI, adapter)
True
>>> adapter.person
'This is a person'
"""
def test_cookie_scope():
"""
The 'request/lp:cookie_scope' TALES expression returns a string
that represents the scope parameters necessary for a cookie to be
available for the entire Launchpad site. It takes into account
the request URL and the cookie_domains setting in launchpad.conf.
>>> from lp.app.browser.tales import RequestAPI
>>> def cookie_scope(url):
... class FakeRequest:
... def getURL(self):
... return url
... return RequestAPI(FakeRequest()).cookie_scope
The cookie scope will use the secure attribute if the request was
secure:
>>> print cookie_scope('http://launchpad.net/')
; Path=/; Domain=.launchpad.net
>>> print cookie_scope('https://launchpad.net/')
; Path=/; Secure; Domain=.launchpad.net
The domain parameter is omitted for domains that appear to be
separate from a Launchpad instance:
>>> print cookie_scope('https://example.com/')
; Path=/; Secure
"""
def test_dbschemaapi():
"""
>>> from lp.app.browser.tales import DBSchemaAPI
>>> from lp.code.enums import BranchType
The syntax to get the title is: number/lp:DBSchemaClass
>>> (str(DBSchemaAPI(1).traverse('BranchType', []))
... == BranchType.HOSTED.title)
True
Using an inappropriate number should give a KeyError.
>>> DBSchemaAPI(99).traverse('BranchType', [])
Traceback (most recent call last):
...
KeyError: 99
Using a dbschema name that doesn't exist should give a LocationError
>>> DBSchemaAPI(99).traverse('NotADBSchema', [])
Traceback (most recent call last):
...
LocationError: 'NotADBSchema'
"""
class TestPersonFormatterAPI(TestCaseWithFactory):
"""Tests for PersonFormatterAPI"""
layer = DatabaseFunctionalLayer
def test_link_display_name_id(self):
"""The link to the user profile page using displayname and id."""
person = self.factory.makePerson()
formatter = getAdapter(person, IPathAdapter, 'fmt')
result = formatter.link_display_name_id(None)
expected = '<a href="%s" class="sprite person">%s (%s)</a>' % (
formatter.url(), person.displayname, person.name)
self.assertEqual(expected, result)
class TestTeamFormatterAPI(TestCaseWithFactory):
""" Test permissions required to access TeamFormatterAPI methods.
A user must have launchpad.LimitedView permission to use
TeamFormatterAPI with private teams.
"""
layer = LaunchpadFunctionalLayer
def setUp(self):
super(TestTeamFormatterAPI, self).setUp()
icon = self.factory.makeLibraryFileAlias(
filename='smurf.png', content_type='image/png')
self.team = self.factory.makeTeam(
name='team', displayname='a team', icon=icon,
visibility=PersonVisibility.PRIVATE)
def _make_formatter(self, cache_permission=False):
# Helper to create the formatter and optionally cache the permission.
formatter = getAdapter(self.team, IPathAdapter, 'fmt')
clear_cache()
request = LaunchpadTestRequest()
any_person = self.factory.makePerson()
if cache_permission:
login_person(any_person, request)
precache_permission_for_objects(
request, 'launchpad.LimitedView', [self.team])
return formatter, request, any_person
def _tales_value(self, attr, request, path='fmt'):
# Evaluate the given formatted attribute value on team.
result = test_tales(
"team/%s:%s" % (path, attr), team=self.team, request=request)
return result
def _test_can_view_attribute_no_login(self, attr, hidden=None):
# Test attribute access with no login.
formatter, request, ignore = self._make_formatter()
value = self._tales_value(attr, request)
if value is not None:
if hidden is None:
hidden = formatter.hidden
self.assertEqual(hidden, value)
def _test_can_view_attribute_no_permission(self, attr, hidden=None):
# Test attribute access when user has no permission.
formatter, request, any_person = self._make_formatter()
login_person(any_person, request)
value = self._tales_value(attr, request)
if value is not None:
if hidden is None:
hidden = formatter.hidden
self.assertEqual(hidden, value)
def _test_can_view_attribute_with_permission(self, attr):
# Test attr access when user has launchpad.LimitedView permission.
formatter, request, any_person = self._make_formatter(
cache_permission=True)
self.assertNotEqual(
formatter.hidden, self._tales_value(attr, request))
def _test_can_view_attribute(self, attr, hidden=None):
# Test the visibility of the given attribute
self._test_can_view_attribute_no_login(attr, hidden)
self._test_can_view_attribute_no_permission(attr, hidden)
self._test_can_view_attribute_with_permission(attr)
def test_can_view_displayname(self):
self._test_can_view_attribute('displayname')
def test_can_view_unique_displayname(self):
self._test_can_view_attribute('unique_displayname')
def test_can_view_link(self):
self._test_can_view_attribute(
'link', u'<span class="sprite team"><hidden></span>')
def test_can_view_api_url(self):
self._test_can_view_attribute('api_url')
def test_can_view_url(self):
self._test_can_view_attribute('url')
def test_can_view_icon(self):
self._test_can_view_attribute(
'icon', '<span class="sprite team private"></span>')
class TestObjectFormatterAPI(TestCaseWithFactory):
"""Tests for ObjectFormatterAPI"""
layer = DatabaseFunctionalLayer
def test_object_link_ignores_default(self):
# The rendering of an object's link ignores any specified default
# value which would be used in the case where the object were None.
person = self.factory.makePerson()
person_link = test_tales(
'person/fmt:link::default value', person=person)
self.assertEqual(PersonFormatterAPI(person).link(None), person_link)
person_link = test_tales(
'person/fmt:link:bugs:default value', person=person)
self.assertEqual(PersonFormatterAPI(person).link(
None, rootsite='bugs'), person_link)
class TestFormattersAPI(TestCaseWithFactory):
"""Tests for FormattersAPI."""
layer = DatabaseFunctionalLayer
test_data = (
'http://localhost:8086/bar/baz/foo.html\n'
'ftp://localhost:8086/bar/baz/foo.bar.html\n'
'sftp://localhost:8086/bar/baz/foo.bar.html.\n'
'http://localhost:8086/bar/baz/foo.bar.html;\n'
'news://localhost:8086/bar/baz/foo.bar.html:\n'
'http://localhost:8086/bar/baz/foo.bar.html?\n'
'http://localhost:8086/bar/baz/foo.bar.html,\n'
'<http://localhost:8086/bar/baz/foo.bar.html>\n'
'<http://localhost:8086/bar/baz/foo.bar.html>,\n'
'<http://localhost:8086/bar/baz/foo.bar.html>.\n'
'<http://localhost:8086/bar/baz/foo.bar.html>;\n'
'<http://localhost:8086/bar/baz/foo.bar.html>:\n'
'<http://localhost:8086/bar/baz/foo.bar.html>?\n'
'(http://localhost:8086/bar/baz/foo.bar.html)\n'
'(http://localhost:8086/bar/baz/foo.bar.html),\n'
'(http://localhost:8086/bar/baz/foo.bar.html).\n'
'(http://localhost:8086/bar/baz/foo.bar.html);\n'
'(http://localhost:8086/bar/baz/foo.bar.html):\n'
'http://localhost/bar/baz/foo.bar.html?a=b&b=a\n'
'http://localhost/bar/baz/foo.bar.html?a=b&b=a.\n'
'http://localhost/bar/baz/foo.bar.html?a=b&b=a,\n'
'http://localhost/bar/baz/foo.bar.html?a=b&b=a;\n'
'http://localhost/bar/baz/foo.bar.html?a=b&b=a:\n'
'http://localhost/bar/baz/foo.bar.html?a=b&b='
'a:b;c@d_e%f~g#h,j!k-l+m$n*o\'p\n'
'http://www.searchtools.com/test/urls/(parens).html\n'
'http://www.searchtools.com/test/urls/-dash.html\n'
'http://www.searchtools.com/test/urls/_underscore.html\n'
'http://www.searchtools.com/test/urls/period.x.html\n'
'http://www.searchtools.com/test/urls/!exclamation.html\n'
'http://www.searchtools.com/test/urls/~tilde.html\n'
'http://www.searchtools.com/test/urls/*asterisk.html\n'
'irc://irc.freenode.net/launchpad\n'
'irc://irc.freenode.net/%23launchpad,isserver\n'
'mailto:[email protected]\n'
'jabber:[email protected]\n'
'http://localhost/foo?xxx&\n'
'http://localhost?testing=[square-brackets-in-query]\n')
def test_linkification_with_target(self):
# The text-to-html-with-target formatter sets the target
# attribute of the links it produces to _new.
linkified_text = test_tales(
'foo/fmt:text-to-html-with-target', foo=self.test_data)
tree = html.fromstring(linkified_text)
for link in tree.xpath('//a'):
self.assertEqual('_new', link.get('target'))
class TestNoneFormatterAPI(TestCaseWithFactory):
"""Tests for NoneFormatterAPI"""
layer = FunctionalLayer
def test_format_link_none(self):
# Test that format_link() handles None correctly.
self.assertEqual(format_link(None), 'None')
self.assertEqual(format_link(None, empty_value=''), '')
def test_valid_traversal(self):
# Traversal of allowed names works as expected.
allowed_names = set([
'approximatedate',
'approximateduration',
'break-long-words',
'date',
'datetime',
'displaydate',
'isodate',
'email-to-html',
'exactduration',
'lower',
'nice_pre',
'nl_to_br',
'pagetitle',
'rfc822utcdatetime',
'text-to-html',
'time',
'url',
'link',
])
for name in allowed_names:
self.assertEqual('', test_tales('foo/fmt:%s' % name, foo=None))
def test_value_override(self):
# Override of rendered value works as expected.
self.assertEqual(
'default value',
test_tales('foo/fmt:link::default value', foo=None))
self.assertEqual(
'default value',
test_tales('foo/fmt:link:rootsite:default value', foo=None))
def test_invalid_traversal(self):
# Traversal of invalid names raises an exception.
adapter = getAdapter(None, IPathAdapter, 'fmt')
traverse = getattr(adapter, 'traverse', None)
self.failUnlessRaises(TraversalError, traverse, "foo", [])
def test_shorten_traversal(self):
# Traversal of 'shorten' works as expected.
adapter = getAdapter(None, IPathAdapter, 'fmt')
traverse = getattr(adapter, 'traverse', None)
# We expect that the last item in extra will be popped off.
extra = ['1', '2']
self.assertEqual('', traverse('shorten', extra))
self.assertEqual(['1'], extra)
class TestIRCNicknameFormatterAPI(TestCaseWithFactory):
"""Tests for IRCNicknameFormatterAPI"""
layer = DatabaseFunctionalLayer
def test_nick_displayname(self):
person = self.factory.makePerson(name='fred')
ircset = getUtility(IIrcIDSet)
ircID = ircset.new(person, "irc.canonical.com", "fred")
self.assertEqual(
'fred on irc.canonical.com',
test_tales('nick/fmt:displayname', nick=ircID))
def test_nick_formatted_displayname(self):
person = self.factory.makePerson(name='fred')
ircset = getUtility(IIrcIDSet)
# Include some bogus markup to check escaping works.
ircID = ircset.new(person, "<b>irc.canonical.com</b>", "fred")
expected_html = test_tales(
'nick/fmt:formatted_displayname', nick=ircID)
self.assertEquals(
u'<strong>fred</strong>\n'
'<span class="lesser"> on </span>\n'
'<strong><b>irc.canonical.com</b></strong>\n',
expected_html)
class ObjectImageDisplayAPITestCase(TestCaseWithFactory):
"""Tests for ObjectImageDisplayAPI"""
layer = LaunchpadFunctionalLayer
def test_custom_icon_url_context_is_None(self):
# When the context is None, the URL is an empty string.
display_api = ObjectImageDisplayAPI(None)
self.assertEqual('', display_api.custom_icon_url())
def test_custom_icon_url_context_has_no_icon(self):
# When the context has not set the custom icon, the URL is None.
product = self.factory.makeProduct()
display_api = ObjectImageDisplayAPI(product)
self.assertEqual(None, display_api.custom_icon_url())
def test_custom_icon_url_context_has_an_icon(self):
# When the context has a custom icon, the URL is for the
# LibraryFileAlias.
icon = self.factory.makeLibraryFileAlias(
filename='smurf.png', content_type='image/png')
product = self.factory.makeProduct(icon=icon)
display_api = ObjectImageDisplayAPI(product)
self.assertEqual(icon.getURL(), display_api.custom_icon_url())
class TestDateTimeFormatterAPI(TestCase):
def test_yearDelta(self):
"""Test that year delta gives reasonable values."""
def assert_delta(expected, old, new):
old = datetime(*old, tzinfo=utc)
new = datetime(*new, tzinfo=utc)
delta = DateTimeFormatterAPI._yearDelta(old, new)
self.assertEqual(expected, delta)
assert_delta(1, (2000, 1, 1), (2001, 1, 1))
assert_delta(0, (2000, 1, 2), (2001, 1, 1))
# Check leap year handling (2004 is an actual leap year)
assert_delta(0, (2003, 10, 10), (2004, 2, 29))
assert_delta(0, (2004, 2, 29), (2005, 2, 28))
def getDurationsince(self, delta):
"""Return the durationsince for a given delta."""
creation = datetime(2000, 1, 1, tzinfo=utc)
formatter = DateTimeFormatterAPI(creation)
formatter._now = lambda: creation + delta
return formatter.durationsince()
def test_durationsince_in_years(self):
"""Values with different years are measured in years."""
self.assertEqual('1 year', self.getDurationsince(timedelta(366)))
self.assertEqual('2 years', self.getDurationsince(timedelta(731)))
def test_durationsince_in_day(self):
"""Values with different days are measured in days."""
self.assertEqual('1 day', self.getDurationsince(timedelta(1)))
self.assertEqual('365 days', self.getDurationsince(timedelta(365)))
def test_durationsince_in_hours(self):
"""Values with different hours are measured in hours."""
self.assertEqual('2 hours', self.getDurationsince(timedelta(0, 7200)))
self.assertEqual('1 hour', self.getDurationsince(timedelta(0, 3600)))
def test_durationsince_in_minutes(self):
"""Values with different minutes are measured in minutes."""
five = self.getDurationsince(timedelta(0, 300))
self.assertEqual('5 minutes', five)
self.assertEqual('1 minute', self.getDurationsince(timedelta(0, 60)))
def test_durationsince_in_seconds(self):
"""Values in seconds are reported as "less than a minute."""
self.assertEqual('less than a minute',
self.getDurationsince(timedelta(0, 59)))
class TestPackageBuildFormatterAPI(TestCaseWithFactory):
"""Tests for PackageBuildFormatterAPI."""
layer = LaunchpadFunctionalLayer
def _make_public_build_for_private_team(self):
spph = self.factory.makeSourcePackagePublishingHistory()
team_owner = self.factory.makePerson()
private_team = self.factory.makeTeam(
owner=team_owner, visibility=PersonVisibility.PRIVATE)
p3a = self.factory.makeArchive(owner=private_team, private=True)
build = self.factory.makeBinaryPackageBuild(
source_package_release=spph.sourcepackagerelease, archive=p3a)
return build, p3a, team_owner
def test_public_build_private_team_no_permission(self):
# A `PackageBuild` for a public `SourcePackageRelease` in an archive
# for a private team is rendered gracefully when the user has no
# permission.
build, _, _ = self._make_public_build_for_private_team()
# Make sure this is a valid test; the build itself must be public.
self.assertTrue(check_permission('launchpad.View', build))
self.assertEqual('private job', format_link(build))
def test_public_build_private_team_with_permission(self):
# Members of a private team can see their builds.
build, p3a, team_owner = self._make_public_build_for_private_team()
login_person(team_owner)
self.assertIn(
"[%s/%s]" % (p3a.owner.name, p3a.name), format_link(build))
| agpl-3.0 | 2,405,106,669,734,420,000 | 36.565815 | 78 | 0.637885 | false |
freedomhui/cinder | cinder/version.py | 1 | 1193 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
CINDER_VERSION = ['2012', '2', '1']
YEAR, COUNT, REVISION = CINDER_VERSION
FINAL = False # This becomes true at Release Candidate time
def canonical_version_string():
return '.'.join(filter(None, CINDER_VERSION))
def version_string():
if FINAL:
return canonical_version_string()
else:
return '%s-dev' % (canonical_version_string(),)
def vcs_version_string():
return 'LOCALBRANCH:LOCALREVISION'
def version_string_with_vcs():
return '%s-%s' % (canonical_version_string(), vcs_version_string())
| apache-2.0 | 8,558,772,972,088,632,000 | 30.394737 | 78 | 0.699078 | false |
cercisanat/cercisanat.com | cerci_admin/management/commands/epub.py | 1 | 7038 | #!/usr/bin/env python
#coding:utf-8
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.template import Context
from django.template.loader import get_template
from cerci_issue.models import Issue
import os
import shutil
from bs4 import BeautifulSoup
from django.core.files.storage import default_storage
import sys
from xml.dom.minidom import parse, parseString
import codecs
import string
import mimetypes
def translate_non_alphanumerics(to_translate, translate_to=u''):
not_letters_or_digits = string.punctuation.replace('_', '').replace('-', '')
translate_table = dict((ord(char), translate_to) for char in not_letters_or_digits)
return to_translate.translate(translate_table)
def generate_manifest(path):
pwd = os.getcwd()
os.chdir(path)
try:
container = parse(os.path.join(path, 'META-INF/container.xml'))
except IOError:
raise Exception('container.xml not found in specified path')
ops_path = container.getElementsByTagName("rootfiles")[0]\
.getElementsByTagName("rootfile")[0].attributes['full-path'].value
manifest = parseString('<manifest></manifest>')
mimetypes.add_type('application/vnd.ms-opentype', '.ttf', True)
mimetypes.add_type('application/vnd.ms-opentype', '.otf', True)
mimetypes.add_type('application/font-woff', '.wof', True)
excluded_files = ['package.opf']
special_files = {'toc.xhtml': {'properties': 'nav'}}
output = ''
for root, dirs, files in os.walk(os.path.join(path.decode('utf-8'), os.path.dirname(ops_path))):
for filename in files:
relpath = os.path.relpath(os.path.join(root, filename), os.path.dirname(ops_path))
html_id = translate_non_alphanumerics(relpath.replace('/', '__').replace(' ', ''))
mimetype = mimetypes.guess_type(relpath)
if mimetype[0]:
if not relpath in excluded_files:
item = manifest.createElement('item')
item.setAttribute('id', html_id)
item.setAttribute('href', relpath)
item.setAttribute('media-type', mimetype[0])
if relpath in special_files:
for attr in special_files[relpath]:
item.setAttribute(attr, special_files[relpath][attr])
output += item.toxml() + '\n'
os.chdir(pwd)
return output
def generate_spine(spine):
output = ''
for item in spine:
output += '<itemref idref="%s"/>\n' % item.replace('.', '')
return output
def generate_toc(spine):
output = ''
for item in spine:
output += '<li><a href="%s">T</a></li>\n' % item
return output
class Command(BaseCommand):
args = '<issue_number>'
help = 'Generates an epub file whish is most probably invalid'
def handle(self, *args, **options):
issue_number = args[0]
issue = Issue.objects.get(number=issue_number)
issuecontents = issue.get_contents()
skeleton_path = os.path.join(os.path.dirname(__file__), 'epub-skeleton')
epub_files_root = os.path.join(settings.MEDIA_ROOT, 'epubs')
path = os.path.join(epub_files_root, '%s-%s' % (issue_number, issue.slug))
OPS = os.path.join(path, 'container', 'OPS')
if not os.path.exists(epub_files_root):
os.mkdir(epub_files_root)
if os.path.exists(path):
shutil.rmtree(path)
shutil.copytree(skeleton_path, path)
template = get_template('epub/content.xhtml')
spine = []
for counter, issuecontent in enumerate(issuecontents):
xhtml = template.render(Context({'issue': issue, 'issuecontent': issuecontent.content}))
soup = BeautifulSoup(xhtml)
# save inline images added by ckeditor
for image in soup.findAll('img'):
image_src = image.get('src')
if image_src.startswith('/media/ckeditor/'):
image_src_path = image_src.replace('/media/', '')
image_src_output = image_src.replace('/media/ckeditor/', 'images/').replace(' ', '')
image_dirname = os.path.dirname(image_src_output)
if not os.path.exists(os.path.join(OPS, image_dirname )):
os.makedirs(os.path.join(OPS, image_dirname ))
if default_storage.exists(image_src_path):
with default_storage.open(image_src_path, 'r') as input_file:
with open(os.path.join(OPS, image_src_output), 'w') as output_file:
output_file.write(input_file.read())
image.attrs['src'] = image_src_output
# save content images (figures)
figures = issuecontent.content.figures.all()
if figures.count():
image_src = figures[0].image.url
image_src_path = figures[0].image.path
image_src_output = image_src.replace('/media/', '').replace(' ', '')
image_dirname = os.path.dirname(image_src_output)
if not os.path.exists(os.path.join(OPS, image_dirname )):
os.makedirs(os.path.join(OPS, image_dirname ))
if default_storage.exists(image_src_path):
with default_storage.open(image_src_path, 'r') as input_file:
with open(os.path.join(OPS, image_src_output), 'w') as output_file:
output_file.write(input_file.read())
xhtml = soup.prettify()
filename = 'article-%s.xhtml'%(counter+1)
spine.append(filename)
with open(os.path.join(OPS, 'article-%s.xhtml'%(counter+1)), 'w') as f:
f.write(xhtml.encode('utf8'))
# create toc.xhtml
template = get_template('epub/toc.xhtml')
generated_toc = generate_toc(spine)
tocxhtml = template.render(Context({'issue': issue,
'generated_toc': generated_toc}))
soup = BeautifulSoup(tocxhtml)
tocxhtml = soup.prettify()
with open(os.path.join(OPS, 'toc.xhtml'), 'w') as f:
f.write(tocxhtml.encode('utf8'))
# generate package.opf
generated_manifest = generate_manifest(os.path.join(path, 'container'))
generated_spine = generate_spine(spine)
template = get_template('epub/package.opf')
package_opf = template.render(Context({'issue': issue,
'generated_manifest': generated_manifest,
'generated_spine': generated_spine}))
soup = BeautifulSoup(package_opf)
package_opf = soup.prettify()
with open(os.path.join(OPS, 'package.opf'), 'w') as f:
f.write(package_opf.encode('utf8'))
| gpl-3.0 | -2,813,511,705,482,807,300 | 42.714286 | 104 | 0.578147 | false |
gencer/sentry | tests/sentry/api/endpoints/test_user_notifications.py | 1 | 1384 | from __future__ import absolute_import
from sentry.testutils import APITestCase
from django.core.urlresolvers import reverse
class UserListTest(APITestCase):
def test_lookup_self(self):
user = self.create_user(email='[email protected]')
self.login_as(user=user)
url = reverse(
'sentry-api-0-user-notifications', kwargs={
'user_id': 'me',
}
)
resp = self.client.get(url, format='json')
assert resp.status_code == 200
def test_lookup_other_user(self):
user_a = self.create_user(email='[email protected]')
user_b = self.create_user(email='[email protected]')
self.login_as(user=user_b)
url = reverse(
'sentry-api-0-user-notifications', kwargs={
'user_id': user_a.id
}
)
resp = self.client.get(url, format='json')
assert resp.status_code == 403
def test_superuser(self):
user = self.create_user(email='[email protected]')
superuser = self.create_user(email='[email protected]', is_superuser=True)
self.login_as(user=superuser, superuser=True)
url = reverse(
'sentry-api-0-user-notifications', kwargs={
'user_id': user.id,
}
)
resp = self.client.get(url, format='json')
assert resp.status_code == 200
| bsd-3-clause | -1,322,648,062,000,574,000 | 25.615385 | 78 | 0.570087 | false |
Bismarrck/pymatgen | pymatgen/io/zeopp.py | 1 | 23655 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
<<<<<<< HEAD
"""
Module implementing classes and functions to use Zeo++.
Zeo++ can be obtained from http://www.maciejharanczyk.info/Zeopp/
"""
from six.moves import map
__author__ = "Bharat Medasani"
__copyright = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "[email protected]"
__data__ = "Aug 2, 2013"
=======
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
import os
import re
from monty.io import zopen
from monty.dev import requires
from monty.tempfile import ScratchDir
from pymatgen.core.structure import Structure, Molecule
from pymatgen.core.lattice import Lattice
from pymatgen.io.cssr import Cssr
from pymatgen.io.xyz import XYZ
try:
from zeo.netstorage import AtomNetwork, VoronoiNetwork
from zeo.area_volume import volume, surface_area
<<<<<<< HEAD
from zeo.cluster import get_nearest_largest_diameter_highaccuracy_vornode,\
generate_simplified_highaccuracy_voronoi_network, \
prune_voronoi_network_close_node
=======
from zeo.cluster import get_nearest_largest_diameter_highaccuracy_vornode, \
generate_simplified_highaccuracy_voronoi_network, \
prune_voronoi_network_close_node
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
zeo_found = True
except ImportError:
zeo_found = False
<<<<<<< HEAD
=======
"""
Module implementing classes and functions to use Zeo++.
Zeo++ Installation Steps:
========================
1) Zeo++ requires Voro++. Download Voro++ from code.lbl.gov using
subversion:
"svn checkout --username anonsvn https://code.lbl.gov/svn/voro/trunk
Password is anonsvn.
2) Stable version of Zeo++ can be obtained from
http://www.maciejharanczyk.info/Zeopp/
Alternatively it can be obtained from code.lbl.gov. Replace voro
with zeo.
3) (Optional) Install cython from pip
Mac OS X:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment
(compiler, linker).
(b) Run make command
5) (a) Edit the Zeo++/trunk/cython_wrapper/setup.py to correctly point to
Voro++ directory.
(b) Run "python setup.py develop" to install Zeo++ python bindings.
Be patient, it will take a while.
Linux:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment.
(b) Also add -fPIC option to CFLAGS variable in config.mk file.
(c) Run make command
5) (a) Go to Zeo++/zeo/trunk folder and compile zeo++ library using the
command "make dylib".
(b) Edit the Zeo++/trunk/cython_wrapper/setup_alt.py to correctly
point to Voro++ directory.
(c) Run "python setup_alt.py develop" to install Zeo++ python bindings.
Zeo++ Post-Installation Checking:
==============================
1) Go to pymatgen/io/tests and run "python test_zeoio.py"
If Zeo++ python bindings are properly installed, the tests should
pass. One or two tests will be skipped.
b) Go to pymatgen/analysis/defects/tests and run
"python test_point_defects.py". Lots of tests will be skipped if GULP
is not installed. But there should be no errors.
"""
__author__ = "Bharat Medasani"
__copyright = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "[email protected]"
__data__ = "Aug 2, 2013"
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
class ZeoCssr(Cssr):
"""
ZeoCssr adds extra fields to CSSR sites to conform with Zeo++
input CSSR format. The coordinate system is rorated from xyz to zyx.
This change aligns the pivot axis of pymatgen (z-axis) to pivot axis
of Zeo++ (x-axis) for structurural modifications.
Args:
structure: A structure to create ZeoCssr object
"""
def __init__(self, structure):
super(ZeoCssr, self).__init__(structure)
def __str__(self):
"""
CSSR.__str__ method is modified to padd 0's to the CSSR site data.
The padding is to conform with the CSSR format supported Zeo++.
The oxidation state is stripped from site.specie
Also coordinate system is rotated from xyz to zxy
"""
output = [
"{:.4f} {:.4f} {:.4f}"
<<<<<<< HEAD
#.format(*self.structure.lattice.abc),
.format(self.structure.lattice.c,
self.structure.lattice.a,
self.structure.lattice.b),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1"
#.format(*self.structure.lattice.angles),
.format(self.structure.lattice.gamma,
self.structure.lattice.alpha,
self.structure.lattice.beta),
=======
# .format(*self.structure.lattice.abc),
.format(self.structure.lattice.c,
self.structure.lattice.a,
self.structure.lattice.b),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1"
# .format(*self.structure.lattice.angles),
.format(self.structure.lattice.gamma,
self.structure.lattice.alpha,
self.structure.lattice.beta),
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"{} 0".format(len(self.structure)),
"0 {}".format(self.structure.formula)
]
for i, site in enumerate(self.structure.sites):
<<<<<<< HEAD
#if not hasattr(site, 'charge'):
# charge = 0
#else:
# charge = site.charge
charge = site.charge if hasattr(site, 'charge') else 0
#specie = site.specie.symbol
specie = site.species_string
output.append(
"{} {} {:.4f} {:.4f} {:.4f} 0 0 0 0 0 0 0 0 {:.4f}"
.format(
i+1, specie, site.c, site.a, site.b, charge
#i+1, site.specie, site.a, site.b, site.c, site.charge
=======
# if not hasattr(site, 'charge'):
# charge = 0
# else:
# charge = site.charge
charge = site.charge if hasattr(site, 'charge') else 0
# specie = site.specie.symbol
specie = site.species_string
output.append(
"{} {} {:.4f} {:.4f} {:.4f} 0 0 0 0 0 0 0 0 {:.4f}"
.format(
i + 1, specie, site.c, site.a, site.b, charge
# i+1, site.specie, site.a, site.b, site.c, site.charge
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
)
)
return "\n".join(output)
@staticmethod
def from_string(string):
"""
Reads a string representation to a ZeoCssr object.
Args:
string: A string representation of a ZeoCSSR.
Returns:
ZeoCssr object.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(i) for i in toks]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
a = lengths.pop(-1)
lengths.insert(0, a)
alpha = angles.pop(-1)
angles.insert(0, alpha)
latt = Lattice.from_lengths_and_angles(lengths, angles)
sp = []
coords = []
chrg = []
for l in lines[4:]:
m = re.match(r'\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+' +
r'([0-9\-\.]+)\s+(?:0\s+){8}([0-9\-\.]+)', l.strip())
if m:
sp.append(m.group(1))
<<<<<<< HEAD
#coords.append([float(m.group(i)) for i in xrange(2, 5)])
=======
# coords.append([float(m.group(i)) for i in xrange(2, 5)])
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
coords.append([float(m.group(i)) for i in [3, 4, 2]])
chrg.append(m.group(5))
return ZeoCssr(
Structure(latt, sp, coords, site_properties={'charge': chrg})
)
@staticmethod
def from_file(filename):
"""
Reads a CSSR file to a ZeoCssr object.
Args:
filename: Filename to read from.
Returns:
ZeoCssr object.
"""
with zopen(filename, "r") as f:
return ZeoCssr.from_string(f.read())
class ZeoVoronoiXYZ(XYZ):
"""
Class to read Voronoi Nodes from XYZ file written by Zeo++.
The sites have an additional column representing the voronoi node radius.
The voronoi node radius is represented by the site property voronoi_radius.
Args:
mol: Input molecule holding the voronoi node information
"""
def __init__(self, mol):
super(ZeoVoronoiXYZ, self).__init__(mol)
@staticmethod
def from_string(contents):
"""
Creates Zeo++ Voronoi XYZ object from a string.
from_string method of XYZ class is being redefined.
Args:
contents: String representing Zeo++ Voronoi XYZ file.
Returns:
ZeoVoronoiXYZ object
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
prop = []
coord_patt = re.compile(
r"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" +
r"([0-9\-\.]+)"
)
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
<<<<<<< HEAD
#coords.append(map(float, m.groups()[1:4])) # this is 0-indexed
=======
# coords.append(map(float, m.groups()[1:4])) # this is 0-indexed
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
coords.append([float(j)
for j in [m.group(i) for i in [3, 4, 2]]])
prop.append(float(m.group(5)))
return ZeoVoronoiXYZ(
Molecule(sp, coords, site_properties={'voronoi_radius': prop})
)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return ZeoVoronoiXYZ.from_string(f.read())
def __str__(self):
output = [str(len(self._mol)), self._mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(
self.precision
)
for site in self._mol:
output.append(fmtstr.format(
site.specie.symbol, site.z, site.x, site.y,
<<<<<<< HEAD
#site.specie, site.x, site.y, site.z,
=======
# site.specie, site.x, site.y, site.z,
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
site.properties['voronoi_radius']
))
return "\n".join(output)
<<<<<<< HEAD
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
=======
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def get_voronoi_nodes(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
<<<<<<< HEAD
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
=======
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
vornet, vor_edge_centers, vor_face_centers = \
atmnet.perform_voronoi_decomposition()
vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_lengths_and_angles(
structure.lattice.abc, structure.lattice.angles)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
<<<<<<< HEAD
#PMG-Zeo c<->a transformation for voronoi face centers
rot_face_centers = [(center[1],center[2],center[0]) for center in
vor_face_centers]
rot_edge_centers = [(center[1],center[2],center[0]) for center in
=======
# PMG-Zeo c<->a transformation for voronoi face centers
rot_face_centers = [(center[1], center[2], center[0]) for center in
vor_face_centers]
rot_edge_centers = [(center[1], center[2], center[0]) for center in
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
vor_edge_centers]
species = ["X"] * len(rot_face_centers)
prop = [0.0] * len(rot_face_centers) # Vor radius not evaluated for fc
vor_facecenter_struct = Structure(
lattice, species, rot_face_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
species = ["X"] * len(rot_edge_centers)
prop = [0.0] * len(rot_edge_centers) # Vor radius not evaluated for fc
vor_edgecenter_struct = Structure(
lattice, species, rot_edge_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct
<<<<<<< HEAD
=======
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):
"""
Analyze the void space in the input structure using high accuracy
voronoi decomposition.
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms.
Default is 0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_flag = True
rad_file = name + ".rad"
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
print("{} {}".format(el, rad_dict[el].real), file=fp)
atmnet = AtomNetwork.read_from_CSSR(
<<<<<<< HEAD
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
#vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
#generate_simplified_highaccuracy_voronoi_network(atmnet)
#get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
=======
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
# vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
# generate_simplified_highaccuracy_voronoi_network(atmnet)
# get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_lengths_and_angles(
structure.lattice.abc, structure.lattice.angles)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct
<<<<<<< HEAD
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
=======
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
<<<<<<< HEAD
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file) as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {'inc_sph_max_dia':fields[0],
'free_sph_max_dia':fields[1],
'inc_sph_along_free_sph_path_max_dia':fields[2]}
=======
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file, "rt") as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {'inc_sph_max_dia': fields[0],
'free_sph_max_dia': fields[1],
'inc_sph_along_free_sph_path_max_dia': fields[2]}
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
return free_sphere_params
# Deprecated. Not needed anymore
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3,
probe_rad=0.1):
"""
Computes the volume and surface area of isolated void using Zeo++.
Useful to compute the volume and surface area of vacant site.
Args:
structure: pymatgen Structure containing vacancy
rad_dict(optional): Dictionary with short name of elements and their
radii.
chan_rad(optional): Minimum channel Radius.
probe_rad(optional): Probe radius for Monte Carlo sampling.
Returns:
volume: floating number representing the volume of void
"""
with ScratchDir('.'):
name = "temp_zeo"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
if rad_dict:
rad_file = name + ".rad"
with open(rad_file, 'w') as fp:
for el in rad_dict.keys():
fp.write("{0} {1}".format(el, rad_dict[el]))
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)
vol_str = volume(atmnet, 0.3, probe_rad, 10000)
sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)
vol = None
sa = None
for line in vol_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
vol = -1.0
break
if float(fields[1]) == 0:
vol = -1.0
break
vol = float(fields[3])
for line in sa_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
<<<<<<< HEAD
#raise ValueError("Too many voids")
=======
# raise ValueError("Too many voids")
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
sa = -1.0
break
if float(fields[1]) == 0:
sa = -1.0
break
sa = float(fields[3])
if not vol or not sa:
raise ValueError("Error in zeo++ output stream")
return vol, sa
| mit | 8,012,744,464,591,307,000 | 36.135008 | 81 | 0.582752 | false |
LiorAbitbol/vmwvro | vmwvro/clients.py | 1 | 1707 | """
VMware vRealize Client implementation and supporting objects.
Copyright (c) 2017, Lior P. Abitbol <[email protected]>
"""
import logging
import requests
from .config import URL_GET_WORKFLOW_BY_ID
from .utils import format_url, is_json
from .workflows import Workflow
class Client:
def __init__(self, session):
"""
Returns a new Client instance
:param session:
Session object containing Url and authentication for vRO.
"""
self.log = logging.getLogger(__class__.__name__)
if session.url is None or session.basic_auth is None:
self.log.error("Session object is invalid, missing Url and/or authentication data.")
raise ValueError("Session object is invalid!")
self.session = session
def get_workflow(self, workflow_id):
"""
Get a Workflow object by Id lookup.
:param workflow_id:
The Id of the Workflow to get.
"""
url = format_url(URL_GET_WORKFLOW_BY_ID,
base_url=self.session.url,
id=workflow_id)
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
r = requests.get(url,
auth=self.session.basic_auth,
verify=self.session.verify_ssl,
headers=headers)
r.raise_for_status()
if not is_json(r.text):
raise ValueError("vRO did not return JSON response!")
wf = Workflow(session=self.session)
wf.load_from_json(data=r.json())
return wf
| mit | 1,919,795,720,727,053,300 | 26.932203 | 96 | 0.556532 | false |
barsnadcat/evegant | ItemStackGraphic.py | 1 | 1772 |
from PyQt5.QtCore import QPointF, QRect, QRectF, QSize, QSizeF, Qt
from PyQt5.QtWidgets import QGraphicsItem, QGraphicsPixmapItem
from PyQt5.QtGui import QFontMetricsF, QFont
class InputGraphic(QGraphicsItem):
def __init__(self, aItemStack, aParent, aPos, aToolkitTypes):
super().__init__(aParent)
self.setPos(aPos)
self.itemStack = aItemStack
self.font = QFont()
icon = QGraphicsPixmapItem(aToolkitTypes.GetTypePixmap(self.itemStack.itemId, 32), self)
icon.setPos(QPointF(2, 2))
def paint(self, painter, option, widget=None):
rect = self.boundingRect()
painter.drawText(rect, Qt.AlignVCenter + Qt.AlignRight, str(self.itemStack.ammount))
def GetScenePos(self):
return self.scenePos() + QPointF(0, 17)
def boundingRect(self):
fm = QFontMetricsF(self.font)
width = 34 + fm.width(str(self.itemStack.ammount))
return QRectF(0, 0, width, 35)
def GetItemId(self):
return self.itemStack.itemId
class OutputGraphic(QGraphicsItem):
def __init__(self, aItemStack, aParent, aPos, aToolkitTypes):
super().__init__(aParent)
self.setPos(aPos)
self.itemStack = aItemStack
self.font = QFont()
icon = QGraphicsPixmapItem(aToolkitTypes.GetTypePixmap(self.itemStack.itemId, 32), self)
icon.setPos(QPointF(-34, 2))
def GetWidth(self):
fm = QFontMetricsF(self.font)
return 34 + fm.width(str(self.itemStack.ammount))
def paint(self, painter, option, widget=None):
width = self.GetWidth()
rect = QRectF(-width, 0, width - 34, 35)
painter.drawText(rect, Qt.AlignVCenter + Qt.AlignRight, str(self.itemStack.ammount))
def GetScenePos(self):
return self.scenePos() + QPointF(0, 17)
def boundingRect(self):
width = self.GetWidth()
return QRectF(-width, 0, width, 35)
def GetItemId(self):
return self.itemStack.itemId | gpl-3.0 | 6,923,136,905,170,005,000 | 29.568966 | 90 | 0.730813 | false |
pytroll/satpy | satpy/tests/reader_tests/test_viirs_compact.py | 1 | 121654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Module for testing the satpy.readers.viirs_compact module."""
import os
import tempfile
import unittest
from contextlib import suppress
import h5py
import numpy as np
class TestCompact(unittest.TestCase):
"""Test class for reading compact viirs format."""
def setUp(self):
"""Create a fake file from scratch."""
fake_dnb = {
"All_Data": {
"ModeGran": {"value": 0},
"ModeScan": {
"value": np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
254,
249,
],
dtype=np.uint8,
)
},
"NumberOfScans": {"value": np.array([47])},
"VIIRS-DNB-GEO_All": {
"AlignmentCoefficient": {
"value": np.array(
[
2.11257413e-02,
2.11152732e-02,
2.11079046e-02,
2.10680142e-02,
1.80840008e-02,
1.80402063e-02,
1.79968309e-02,
1.79477539e-02,
2.20463774e-03,
2.17431062e-03,
2.14360282e-03,
2.11503846e-03,
2.08630669e-03,
2.05924874e-03,
2.03177333e-03,
2.00573727e-03,
1.98072987e-03,
1.95503305e-03,
1.93077011e-03,
1.90702057e-03,
1.88353716e-03,
1.86104013e-03,
1.83863181e-03,
1.81696517e-03,
1.79550308e-03,
1.77481642e-03,
1.75439729e-03,
1.73398503e-03,
1.71459839e-03,
1.69516564e-03,
1.67622324e-03,
1.65758410e-03,
1.63990213e-03,
1.62128301e-03,
1.60375470e-03,
1.58667017e-03,
1.61543000e-03,
1.59775047e-03,
1.50719041e-03,
1.48937735e-03,
1.47257745e-03,
1.50070526e-03,
1.48288533e-03,
9.29064234e-04,
9.12246935e-04,
8.95748264e-04,
8.71886965e-04,
8.55044520e-04,
8.38686305e-04,
8.18263041e-04,
8.01501446e-04,
7.85346841e-04,
1.15984806e-03,
1.14326552e-03,
1.12648588e-03,
1.11018715e-03,
1.09399087e-03,
1.19698711e-03,
1.18051842e-03,
1.16404379e-03,
1.14832399e-03,
9.92591376e-04,
9.75896895e-04,
9.59663419e-04,
9.43415158e-04,
9.27662419e-04,
8.92253709e-04,
8.75947590e-04,
8.60177504e-04,
8.44484195e-04,
8.35279003e-04,
8.19236680e-04,
8.03303672e-04,
7.87482015e-04,
7.60449213e-04,
7.44239136e-04,
7.28625571e-04,
7.12990935e-04,
6.89090986e-04,
6.73000410e-04,
6.57248020e-04,
6.41623745e-04,
6.20219158e-04,
6.04308851e-04,
5.88596100e-04,
5.73108089e-04,
3.65344196e-04,
3.49639275e-04,
3.34273063e-04,
4.81286290e-04,
4.65485587e-04,
4.49862011e-04,
4.34543617e-04,
4.19324206e-04,
2.60536268e-04,
2.45052564e-04,
2.29740850e-04,
2.34466774e-04,
2.18822126e-04,
2.03370175e-04,
1.88058810e-04,
1.60192372e-04,
1.44485937e-04,
1.28920830e-04,
3.45615146e-04,
3.30171984e-04,
3.14682693e-04,
2.99300562e-04,
2.83925037e-04,
2.68518896e-04,
2.53254839e-04,
2.37950648e-04,
2.22716670e-04,
2.07562072e-04,
1.92296386e-04,
1.77147449e-04,
1.61994336e-04,
1.46895778e-04,
1.31844325e-04,
1.16730320e-04,
1.01757469e-04,
8.67861963e-05,
7.18669180e-05,
5.70719567e-05,
4.24701866e-05,
2.84846719e-05,
1.70599415e-05,
-1.47213286e-05,
-2.33691408e-05,
-3.68025649e-05,
-5.12388433e-05,
-6.59972284e-05,
-8.08926561e-05,
-9.58433884e-05,
-1.10882705e-04,
-1.25976600e-04,
-1.41044657e-04,
-1.56166439e-04,
-1.71307023e-04,
-1.86516074e-04,
-2.01731804e-04,
-2.16980450e-04,
-2.32271064e-04,
-2.47527263e-04,
-2.62940506e-04,
-2.78283434e-04,
-2.93711084e-04,
-3.09180934e-04,
-3.24661058e-04,
-3.40237195e-04,
-1.27807143e-04,
-1.43646437e-04,
-1.59638614e-04,
-1.87593061e-04,
-2.03169184e-04,
-2.18941437e-04,
-2.34920750e-04,
-2.30605408e-04,
-2.46262236e-04,
-2.62226094e-04,
-4.19838558e-04,
-4.35510388e-04,
-4.51152271e-04,
-4.67120990e-04,
-4.83241311e-04,
-3.37647041e-04,
-3.53568990e-04,
-3.69836489e-04,
-5.76354389e-04,
-5.92070050e-04,
-6.08178903e-04,
-6.24440494e-04,
-6.45648804e-04,
-6.61431870e-04,
-6.77491073e-04,
-6.93967624e-04,
-7.17683870e-04,
-7.33471534e-04,
-7.49999890e-04,
-7.66390527e-04,
-7.93468382e-04,
-8.09502264e-04,
-8.25728697e-04,
-8.42282083e-04,
-8.51265620e-04,
-8.67322611e-04,
-8.83649045e-04,
-9.00280487e-04,
-9.35055199e-04,
-9.51097580e-04,
-9.67527216e-04,
-9.84144746e-04,
-1.00128003e-03,
-1.15522649e-03,
-1.17168750e-03,
-1.18826574e-03,
-1.20496599e-03,
-1.10272120e-03,
-1.11865194e-03,
-1.13539130e-03,
-1.15241797e-03,
-1.16964686e-03,
-7.97322951e-04,
-8.14269355e-04,
-8.31696263e-04,
-8.51555436e-04,
-8.68656265e-04,
-8.86220601e-04,
-9.09406052e-04,
-9.26509325e-04,
-9.44124535e-04,
-1.49479776e-03,
-1.51314179e-03,
-1.48387800e-03,
-1.50146009e-03,
-1.51945755e-03,
-1.61006744e-03,
-1.62846781e-03,
-1.59783731e-03,
-1.61545863e-03,
-1.63336343e-03,
-1.65167439e-03,
-1.67034590e-03,
-1.68956630e-03,
-1.70884258e-03,
-1.72863202e-03,
-1.74859120e-03,
-1.76901231e-03,
-1.79015659e-03,
-1.81144674e-03,
-1.83329231e-03,
-1.85552111e-03,
-1.87840930e-03,
-1.90151483e-03,
-1.92550803e-03,
-1.94982730e-03,
-1.97511422e-03,
-2.00066133e-03,
-2.02709576e-03,
-2.05422146e-03,
-2.08215159e-03,
-2.11093877e-03,
-2.14011059e-03,
-2.17073411e-03,
-2.20196834e-03,
-2.23409734e-03,
-2.26700748e-03,
-2.30150856e-03,
-2.33719964e-03,
-2.37406371e-03,
-2.41223071e-03,
-2.45184498e-03,
-2.49327719e-03,
-2.53651105e-03,
-2.58166087e-03,
-2.62895599e-03,
-2.67871981e-03,
-2.73117283e-03,
-5.49861044e-03,
-5.55437338e-03,
-5.61159104e-03,
-5.67073002e-03,
-5.73173212e-03,
-5.79498662e-03,
-5.85969677e-03,
-5.92768658e-03,
-5.99809457e-03,
-6.07080618e-03,
-6.14715228e-03,
-6.22711331e-03,
],
dtype=np.float32,
)
},
"ExpansionCoefficient": {
"value": np.array(
[
1.17600127e-03,
1.17271533e-03,
1.17000856e-03,
1.16674276e-03,
2.11251900e-03,
2.10516527e-03,
2.09726905e-03,
2.08941335e-03,
1.63907595e-02,
1.58577170e-02,
1.53679820e-02,
1.49007449e-02,
1.44708352e-02,
1.40612368e-02,
1.36818690e-02,
1.33193973e-02,
1.29744308e-02,
1.26568424e-02,
1.23488475e-02,
1.20567940e-02,
1.17803067e-02,
1.15150018e-02,
1.12629030e-02,
1.10203745e-02,
1.07905651e-02,
1.05690639e-02,
1.03563424e-02,
1.01526314e-02,
9.95650515e-03,
9.76785459e-03,
9.58597753e-03,
9.41115711e-03,
9.23914276e-03,
9.07964632e-03,
8.92116502e-03,
8.76654685e-03,
9.04925726e-03,
8.88936501e-03,
9.14804544e-03,
8.98920093e-03,
8.83030891e-03,
9.06952657e-03,
8.90891161e-03,
1.36343827e-02,
1.32706892e-02,
1.29242949e-02,
1.36271119e-02,
1.32572902e-02,
1.29025253e-02,
1.35165229e-02,
1.31412474e-02,
1.27808526e-02,
8.91761761e-03,
8.74674786e-03,
8.58181808e-03,
8.42147414e-03,
8.26664641e-03,
7.81304855e-03,
7.67400907e-03,
7.54208490e-03,
7.40892906e-03,
8.81091598e-03,
8.62924196e-03,
8.45206063e-03,
8.28018785e-03,
8.11239891e-03,
8.62185098e-03,
8.43446422e-03,
8.25031102e-03,
8.07087123e-03,
8.30837712e-03,
8.11944436e-03,
7.93648325e-03,
7.75875151e-03,
8.14332347e-03,
7.94676598e-03,
7.75293307e-03,
7.56529858e-03,
7.88933039e-03,
7.68536143e-03,
7.48489471e-03,
7.28917075e-03,
7.55438488e-03,
7.34063145e-03,
7.13229552e-03,
6.92783622e-03,
1.06161544e-02,
1.01234140e-02,
9.64432582e-03,
6.52031973e-03,
6.29310543e-03,
6.06948463e-03,
5.84984245e-03,
5.63343242e-03,
8.61937553e-03,
8.08268972e-03,
7.55874207e-03,
6.79610623e-03,
6.32849289e-03,
5.86955249e-03,
5.41723240e-03,
5.56734810e-03,
5.01116784e-03,
4.46233014e-03,
1.40874484e-03,
1.34475902e-03,
1.28140685e-03,
1.21824886e-03,
1.15505024e-03,
1.09222531e-03,
1.02962845e-03,
9.67168540e-04,
9.04808170e-04,
8.42478999e-04,
7.80681905e-04,
7.18652213e-04,
6.56902499e-04,
5.95146266e-04,
5.33432467e-04,
4.72071581e-04,
4.10460081e-04,
3.49062117e-04,
2.87777104e-04,
2.26464268e-04,
1.65259655e-04,
1.03993290e-04,
4.27830964e-05,
-1.84028686e-05,
-7.95840388e-05,
-1.40780976e-04,
-2.01987947e-04,
-2.63233029e-04,
-3.24499299e-04,
-3.85862397e-04,
-4.47216793e-04,
-5.08567959e-04,
-5.70152479e-04,
-6.31901203e-04,
-6.93684444e-04,
-7.55490037e-04,
-8.17523745e-04,
-8.79664498e-04,
-9.41973762e-04,
-1.00450485e-03,
-1.06710335e-03,
-1.12990546e-03,
-1.19290419e-03,
-1.25615683e-03,
-1.31971564e-03,
-1.38323894e-03,
-4.38789371e-03,
-4.93527949e-03,
-5.48970094e-03,
-5.34658274e-03,
-5.79780247e-03,
-6.25621388e-03,
-6.72366377e-03,
-7.48283789e-03,
-8.00681766e-03,
-8.54192488e-03,
-5.58420410e-03,
-5.79793099e-03,
-6.01683883e-03,
-6.23886706e-03,
-6.46463828e-03,
-9.56355780e-03,
-1.00387875e-02,
-1.05282217e-02,
-6.87109074e-03,
-7.07587786e-03,
-7.28309387e-03,
-7.49528036e-03,
-7.23363785e-03,
-7.42882164e-03,
-7.62982434e-03,
-7.83343613e-03,
-7.51076965e-03,
-7.69859226e-03,
-7.88733363e-03,
-8.08352232e-03,
-7.69890239e-03,
-7.87641760e-03,
-8.05852562e-03,
-8.24564695e-03,
-8.00882280e-03,
-8.18727538e-03,
-8.36882368e-03,
-8.55544209e-03,
-8.04922916e-03,
-8.21674801e-03,
-8.38823151e-03,
-8.56383517e-03,
-8.74411128e-03,
-7.35407788e-03,
-7.48245185e-03,
-7.61653157e-03,
-7.75389513e-03,
-8.20003450e-03,
-8.35770369e-03,
-8.51695240e-03,
-8.67962278e-03,
-8.84699915e-03,
-1.26767000e-02,
-1.30308550e-02,
-1.34020159e-02,
-1.27902590e-02,
-1.31374933e-02,
-1.35022206e-02,
-1.28020663e-02,
-1.31427627e-02,
-1.35003338e-02,
-8.81921593e-03,
-8.97676684e-03,
-8.73885304e-03,
-8.89289286e-03,
-9.05076787e-03,
-8.79113190e-03,
-8.94579384e-03,
-8.66949651e-03,
-8.81993212e-03,
-8.97467043e-03,
-9.13402718e-03,
-9.29924846e-03,
-9.47104022e-03,
-9.64829233e-03,
-9.83224157e-03,
-1.00242840e-02,
-1.02243433e-02,
-1.04304748e-02,
-1.06464764e-02,
-1.08723603e-02,
-1.11076497e-02,
-1.13517633e-02,
-1.16107482e-02,
-1.18797245e-02,
-1.21643478e-02,
-1.24597261e-02,
-1.27725713e-02,
-1.31026637e-02,
-1.34509858e-02,
-1.38195883e-02,
-1.42097492e-02,
-1.46267340e-02,
-1.50670996e-02,
-1.55417984e-02,
-1.60482023e-02,
-1.65943075e-02,
-1.71795618e-02,
-1.78127103e-02,
-1.84999816e-02,
-1.92504879e-02,
-2.00698171e-02,
-2.09702197e-02,
-2.19654124e-02,
-2.30720937e-02,
-2.43106075e-02,
-2.57069822e-02,
-2.72962451e-02,
-1.43178934e-02,
-1.48085468e-02,
-1.53383436e-02,
-1.59113277e-02,
-1.65353119e-02,
-1.72161739e-02,
-1.79625414e-02,
-1.87847745e-02,
-1.96950957e-02,
-2.07099430e-02,
-2.18482167e-02,
-2.31328830e-02,
],
dtype=np.float32,
)
},
"Latitude": {"value": np.random.rand(96, 332).astype(np.float32)},
"Longitude": {"value": np.random.rand(96, 332).astype(np.float32)},
"LunarAzimuthAngle": {
"value": np.random.rand(96, 332).astype(np.float32)
},
"LunarZenithAngle": {
"value": np.random.rand(96, 332).astype(np.float32)
},
"MidTime": {
"value": np.array(
[
1950675122400462,
1950675124187044,
1950675125973621,
1950675127760200,
1950675129546777,
1950675131333401,
1950675133119981,
1950675134906559,
1950675136693138,
1950675138479716,
1950675140266341,
1950675142052918,
1950675143839498,
1950675145626075,
1950675147412654,
1950675149199278,
1950675150985857,
1950675152772434,
1950675154559014,
1950675156345591,
1950675158132216,
1950675159918795,
1950675161705373,
1950675163491595,
1950675165278173,
1950675167064395,
1950675168850973,
1950675170637195,
1950675172423773,
1950675174209995,
1950675175996573,
1950675177782795,
1950675179569373,
1950675181355595,
1950675183142173,
1950675184928395,
1950675186714973,
1950675188501195,
1950675190287773,
1950675192073995,
1950675193860573,
1950675195646795,
1950675197433373,
1950675199219595,
1950675201006173,
1950675202792395,
1950675204578973,
-993,
]
)
},
"MoonIllumFraction": {"value": 11.518141746520996},
"MoonPhaseAngle": {"value": 140.32131958007812},
"NumberOfTiePointZoneGroupsScan": {"value": 62},
"NumberOfTiePointZoneGroupsTrack": {"value": 1},
"NumberOfTiePointZonesScan": {
"value": np.array(
[
1,
1,
1,
1,
1,
1,
1,
1,
28,
2,
3,
2,
3,
3,
3,
5,
4,
5,
4,
4,
4,
4,
4,
3,
5,
3,
4,
3,
23,
23,
3,
4,
3,
5,
3,
4,
4,
4,
4,
4,
5,
4,
5,
3,
3,
3,
2,
3,
2,
40,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
],
dtype=np.int32,
)
},
"NumberOfTiePointZonesTrack": {"value": 1},
"PadByte1": {"value": np.array([0, 0, 0], dtype=np.uint8)},
"QF1_SCAN_VIIRSSDRGEO": {
"value": np.array(
[
0,
128,
0,
128,
0,
128,
0,
128,
0,
128,
0,
128,
0,
128,
0,
128,
0,
128,
2,
130,
2,
130,
2,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
142,
14,
0,
],
dtype=np.uint8,
)
},
"QF2_SCAN_VIIRSSDRGEO": {
"value": np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
],
dtype=np.uint8,
)
},
"SCAttitude": {
"value": np.array(
[
[-9.22587514e-01, 3.92340779e00, 5.93621433e-01],
[-2.82428920e-01, 3.98425841e00, 7.05978215e-01],
[5.63421488e-01, 3.83695555e00, 3.93174857e-01],
[-3.16407561e-01, 3.85351181e00, 5.33868372e-01],
[-1.10977542e00, 3.82791996e00, 6.06707633e-01],
[-1.46703672e00, 3.94862103e00, 6.45296216e-01],
[-1.14162290e00, 3.79930806e00, 7.45548725e-01],
[-1.56181908e00, 3.68108273e00, 6.49301231e-01],
[-1.46823406e00, 3.63365412e00, 5.03535330e-01],
[-1.02590537e00, 3.64477968e00, 5.22250295e-01],
[-5.35379410e-01, 3.69151831e00, 4.32526857e-01],
[-5.78065366e-02, 3.37806726e00, 4.95986529e-02],
[-2.40110800e-01, 3.22970843e00, -9.55391768e-03],
[-6.54527247e-01, 3.16465378e00, 1.89672917e-01],
[-1.35780311e00, 3.24750924e00, 1.63008988e-01],
[-1.47417045e00, 3.39788198e00, 1.84387550e-01],
[-1.74577117e00, 3.53278613e00, 1.89606979e-01],
[-1.46304774e00, 3.22666740e00, 1.59070507e-01],
[-4.05473042e00, 3.06258607e00, 1.10443914e00],
[-5.91582203e00, 2.83895302e00, 1.79846287e00],
[-7.04713678e00, 2.55699897e00, 2.23985386e00],
[-7.43741798e00, 2.21711683e00, 2.42266488e00],
[-7.06249666e00, 1.81872594e00, 2.33713675e00],
[-5.96051836e00, 1.36609375e00, 1.99506497e00],
[-4.13137341e00, 8.60225558e-01, 1.39551389e00],
[-1.57741416e00, 3.02793205e-01, 5.36690295e-01],
[7.63817742e-12, 1.11727738e-10, 2.74194088e-11],
[-1.24213686e-11, 8.01499769e-11, -1.34056446e-11],
[1.78272761e-11, 9.04948685e-11, 1.77389995e-11],
[-1.47259357e-11, 9.37734057e-11, -3.89882709e-11],
[-1.94052344e-11, 1.49411969e-10, -2.48492286e-11],
[3.40418752e-12, 1.25333730e-10, 1.14499972e-11],
[5.64890669e-12, 1.35170833e-10, 2.27858565e-11],
[8.78361273e-12, 1.02109009e-10, -5.92111386e-12],
[1.47398396e-11, 8.59943505e-11, -8.54686872e-13],
[-5.35027361e-12, 1.25450331e-10, -1.54262800e-11],
[2.12667054e-11, 1.57356642e-10, 2.54392306e-11],
[-6.39285022e-12, 1.42791029e-10, -8.58749790e-12],
[-2.18451160e-11, 9.94347313e-11, -2.18451160e-11],
[1.77587389e-11, 1.16834944e-10, 3.09037483e-11],
[5.09583955e-12, 1.06878555e-10, 1.30452402e-11],
[-1.25895900e-11, 1.06217646e-10, -1.07971496e-11],
[1.45264981e-11, 1.03935242e-10, 1.73963136e-11],
[-1.41730258e-12, 7.72037989e-11, 1.15057850e-11],
[1.99397634e-11, 1.36618120e-10, 4.70010628e-11],
[1.24784124e-11, 1.14499965e-10, 4.69658253e-12],
[-1.83001236e-11, 5.19546177e-11, -1.31873679e-11],
[-9.99299988e02, -9.99299988e02, -9.99299988e02],
],
dtype=np.float32,
)
},
"SCPosition": {
"value": np.array(
[
[2.3191672e06, -4.5127075e06, 5.1096645e06],
[2.3202438e06, -4.5225140e06, 5.1005205e06],
[2.3213098e06, -4.5323050e06, 5.0913595e06],
[2.3223650e06, -4.5420810e06, 5.0821800e06],
[2.3234100e06, -4.5518415e06, 5.0729835e06],
[2.3244445e06, -4.5615875e06, 5.0637700e06],
[2.3254692e06, -4.5713185e06, 5.0545390e06],
[2.3264830e06, -4.5810340e06, 5.0452915e06],
[2.3274862e06, -4.5907340e06, 5.0360255e06],
[2.3284792e06, -4.6004185e06, 5.0267430e06],
[2.3294620e06, -4.6100885e06, 5.0174430e06],
[2.3304345e06, -4.6197430e06, 5.0081270e06],
[2.3313962e06, -4.6293820e06, 4.9987935e06],
[2.3323475e06, -4.6390050e06, 4.9894420e06],
[2.3332888e06, -4.6486130e06, 4.9800740e06],
[2.3342195e06, -4.6582060e06, 4.9706890e06],
[2.3351398e06, -4.6677835e06, 4.9612880e06],
[2.3360495e06, -4.6773440e06, 4.9518685e06],
[2.3369522e06, -4.6868750e06, 4.9424430e06],
[2.3378502e06, -4.6963695e06, 4.9330150e06],
[2.3387432e06, -4.7058270e06, 4.9235845e06],
[2.3396312e06, -4.7152475e06, 4.9141520e06],
[2.3405140e06, -4.7246290e06, 4.9047175e06],
[2.3413915e06, -4.7339725e06, 4.8952825e06],
[2.3422642e06, -4.7432805e06, 4.8858430e06],
[2.3431318e06, -4.7525505e06, 4.8764035e06],
[2.3439710e06, -4.7618790e06, 4.8668965e06],
[2.3447770e06, -4.7712820e06, 4.8573130e06],
[2.3455728e06, -4.7806710e06, 4.8477115e06],
[2.3463582e06, -4.7900425e06, 4.8380950e06],
[2.3471335e06, -4.7994005e06, 4.8284610e06],
[2.3478980e06, -4.8087395e06, 4.8188110e06],
[2.3486522e06, -4.8180645e06, 4.8091435e06],
[2.3493960e06, -4.8273715e06, 4.7994615e06],
[2.3501298e06, -4.8366645e06, 4.7897610e06],
[2.3508530e06, -4.8459395e06, 4.7800465e06],
[2.3515658e06, -4.8552000e06, 4.7703130e06],
[2.3522680e06, -4.8644420e06, 4.7605655e06],
[2.3529602e06, -4.8736700e06, 4.7508000e06],
[2.3536420e06, -4.8828800e06, 4.7410205e06],
[2.3543132e06, -4.8920755e06, 4.7312230e06],
[2.3549740e06, -4.9012520e06, 4.7214105e06],
[2.3556248e06, -4.9104145e06, 4.7115800e06],
[2.3562650e06, -4.9195590e06, 4.7017360e06],
[2.3568952e06, -4.9286890e06, 4.6918745e06],
[2.3575145e06, -4.9378000e06, 4.6819980e06],
[2.3581235e06, -4.9468960e06, 4.6721035e06],
[-9.9929999e02, -9.9929999e02, -9.9929999e02],
],
dtype=np.float32,
)
},
"SCSolarAzimuthAngle": {
"value": np.array(
[
-140.6137,
-140.54446,
-140.47484,
-140.40486,
-140.33464,
-140.26427,
-140.19333,
-140.12198,
-140.05042,
-139.97855,
-139.90648,
-139.83394,
-139.76117,
-139.68803,
-139.61465,
-139.54103,
-139.46695,
-139.3923,
-139.31741,
-139.2424,
-139.16727,
-139.09201,
-139.01662,
-138.94112,
-138.86546,
-138.78972,
-138.71251,
-138.63487,
-138.5569,
-138.4786,
-138.39995,
-138.32097,
-138.24161,
-138.16193,
-138.0819,
-138.00153,
-137.92078,
-137.8397,
-137.75827,
-137.67648,
-137.59433,
-137.51183,
-137.42896,
-137.34573,
-137.26213,
-137.17819,
-137.09386,
-999.3,
],
dtype=np.float32,
)
},
"SCSolarZenithAngle": {
"value": np.array(
[
135.88528,
135.96703,
136.04868,
136.1302,
136.21165,
136.2931,
136.37451,
136.4556,
136.53659,
136.61748,
136.69843,
136.77931,
136.86021,
136.94092,
137.02148,
137.10208,
137.18248,
137.26239,
137.34204,
137.42155,
137.50092,
137.58014,
137.65923,
137.73816,
137.81696,
137.8956,
137.97507,
138.05447,
138.13382,
138.21303,
138.29218,
138.37122,
138.45016,
138.529,
138.60777,
138.68642,
138.76498,
138.84343,
138.9218,
139.00005,
139.07823,
139.15627,
139.23422,
139.31207,
139.38983,
139.46748,
139.54503,
-999.3,
],
dtype=np.float32,
)
},
"SCVelocity": {
"value": np.array(
[
[605.31726, -5492.9614, -5113.397],
[599.4935, -5484.5615, -5123.1396],
[593.66986, -5476.142, -5132.8657],
[587.8464, -5467.7017, -5142.573],
[582.02313, -5459.241, -5152.263],
[576.19995, -5450.7607, -5161.936],
[570.37714, -5442.2607, -5171.592],
[564.5546, -5433.741, -5181.2295],
[558.73236, -5425.2, -5190.849],
[552.9104, -5416.6396, -5200.4517],
[547.0887, -5408.06, -5210.0366],
[541.26746, -5399.4604, -5219.6035],
[535.44666, -5390.841, -5229.153],
[529.6263, -5382.201, -5238.684],
[523.8063, -5373.5415, -5248.1978],
[517.9866, -5364.863, -5257.694],
[512.16754, -5356.1646, -5267.1724],
[506.34906, -5347.446, -5276.632],
[500.53455, -5338.72, -5286.0645],
[494.72552, -5329.993, -5295.466],
[488.9218, -5321.265, -5304.8364],
[483.1238, -5312.536, -5314.1743],
[477.33157, -5303.806, -5323.4795],
[471.546, -5295.0767, -5332.7515],
[465.7647, -5286.344, -5341.9937],
[459.99005, -5277.613, -5351.2026],
[454.19785, -5268.798, -5360.442],
[448.38614, -5259.887, -5369.7207],
[442.57404, -5250.955, -5378.983],
[436.7639, -5242.0063, -5388.225],
[430.9534, -5233.0366, -5397.4517],
[425.145, -5224.0483, -5406.6567],
[419.33627, -5215.0396, -5415.845],
[413.52963, -5206.013, -5425.014],
[407.72275, -5196.9663, -5434.1665],
[401.91797, -5187.9023, -5443.299],
[396.11307, -5178.8164, -5452.4136],
[390.3103, -5169.7134, -5461.508],
[384.50742, -5160.59, -5470.586],
[378.70673, -5151.4497, -5479.644],
[372.90598, -5142.288, -5488.6846],
[367.1075, -5133.109, -5497.7046],
[361.309, -5123.9097, -5506.708],
[355.5128, -5114.6934, -5515.691],
[349.71658, -5105.4565, -5524.657],
[343.9228, -5096.202, -5533.602],
[338.12906, -5086.927, -5542.53],
[-999.3, -999.3, -999.3],
],
dtype=np.float32,
)
},
"SatelliteAzimuthAngle": {
"value": np.random.rand(96, 332).astype(np.float32)
},
"SatelliteZenithAngle": {
"value": np.random.rand(96, 332).astype(np.float32)
},
"SolarAzimuthAngle": {
"value": np.random.rand(96, 332).astype(np.float32)
},
"SolarZenithAngle": {
"value": np.random.rand(96, 332).astype(np.float32)
},
"StartTime": {
"value": np.array(
[
1950675122120971,
1950675123907557,
1950675125694139,
1950675127480722,
1950675129267304,
1950675131053910,
1950675132840494,
1950675134627077,
1950675136413660,
1950675138200243,
1950675139986850,
1950675141773433,
1950675143560016,
1950675145346598,
1950675147133181,
1950675148919788,
1950675150706371,
1950675152492953,
1950675154279537,
1950675156066119,
1950675157852726,
1950675159639309,
1950675161425892,
1950675163212109,
1950675164998692,
1950675166784909,
1950675168571492,
1950675170357709,
1950675172144292,
1950675173930509,
1950675175717092,
1950675177503309,
1950675179289892,
1950675181076109,
1950675182862692,
1950675184648909,
1950675186435492,
1950675188221709,
1950675190008292,
1950675191794509,
1950675193581092,
1950675195367309,
1950675197153892,
1950675198940109,
1950675200726692,
1950675202512909,
1950675204299492,
-993,
]
)
},
"TiePointZoneGroupLocationScanCompact": {
"value": np.array(
[
0,
2,
4,
6,
8,
10,
12,
14,
16,
45,
48,
52,
55,
59,
63,
67,
73,
78,
84,
89,
94,
99,
104,
109,
113,
119,
123,
128,
132,
156,
180,
184,
189,
193,
199,
203,
208,
213,
218,
223,
228,
234,
239,
245,
249,
253,
257,
260,
264,
267,
308,
310,
312,
314,
316,
318,
320,
322,
324,
326,
328,
330,
],
dtype=np.int32,
)
},
"TiePointZoneGroupLocationTrackCompact": {"value": 0},
"attrs": {
"OriginalFilename": np.array(
[
[
b"GDNBO_j01_d20191025_t0611251_e0612478_b10015_c20191025062405837630_cspp_dev.h5"
]
],
dtype="|S78",
)
},
},
"VIIRS-DNB-SDR_All": {
"NumberOfBadChecksums": {
"value": np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-993,
],
dtype=np.int32,
)
},
"NumberOfDiscardedPkts": {
"value": np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-993,
],
dtype=np.int32,
)
},
"NumberOfMissingPkts": {
"value": np.array(
[
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
17,
18,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
479,
-993,
],
dtype=np.int32,
)
},
"PadByte1": {"value": np.array([0, 0, 0], dtype=np.uint8)},
"QF1_VIIRSDNBSDR": {
"value": (np.random.rand(768, 4064) * 255).astype(np.uint8)
},
"QF2_SCAN_SDR": {
"value": np.array(
[
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
0,
],
dtype=np.uint8,
)
},
"QF3_SCAN_RDR": {
"value": np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
64,
],
dtype=np.uint8,
)
},
"Radiance": {"value": np.random.rand(768, 4064).astype(np.float32)},
"attrs": {
"OriginalFilename": np.array(
[
[
b"SVDNB_j01_d20191025_t0611251_e0612478_b10015_c20191025062427398006_cspp_dev.h5"
]
],
dtype="|S78",
),
"PixelOffsetScan": np.array([[0.5]], dtype=np.float32),
"PixelOffsetTrack": np.array([[0.5]], dtype=np.float32),
"TiePointZoneGroupLocationScan": np.array(
[
[0],
[2],
[4],
[6],
[8],
[10],
[12],
[14],
[16],
[464],
[496],
[544],
[576],
[648],
[720],
[792],
[872],
[928],
[1008],
[1072],
[1136],
[1200],
[1264],
[1328],
[1400],
[1480],
[1552],
[1640],
[1712],
[1896],
[2080],
[2152],
[2240],
[2312],
[2392],
[2464],
[2528],
[2592],
[2656],
[2720],
[2784],
[2864],
[2920],
[3000],
[3072],
[3144],
[3216],
[3248],
[3296],
[3328],
[3968],
[3976],
[3984],
[3992],
[4000],
[4008],
[4016],
[4024],
[4032],
[4040],
[4048],
[4056],
],
dtype=np.int32,
),
"TiePointZoneGroupLocationTrack": np.array(
[[0]], dtype=np.int32
),
"TiePointZoneSizeScan": np.array(
[
[2],
[2],
[2],
[2],
[2],
[2],
[2],
[2],
[16],
[16],
[16],
[16],
[24],
[24],
[24],
[16],
[14],
[16],
[16],
[16],
[16],
[16],
[16],
[24],
[16],
[24],
[22],
[24],
[8],
[8],
[24],
[22],
[24],
[16],
[24],
[16],
[16],
[16],
[16],
[16],
[16],
[14],
[16],
[24],
[24],
[24],
[16],
[16],
[16],
[16],
[8],
[8],
[8],
[8],
[8],
[8],
[8],
[8],
[8],
[8],
[8],
[8],
],
dtype=np.int32,
),
"TiePointZoneSizeTrack": np.array([[16]], dtype=np.int32),
},
},
"attrs": {"MissionStartTime": np.array([[1698019234000000]])},
},
"Data_Products": {
"VIIRS-DNB-GEO": {
"VIIRS-DNB-GEO_Aggr": {
"attrs": {
"AggregateBeginningDate": np.array(
[[b"20191025"]], dtype="|S9"
),
"AggregateBeginningGranuleID": np.array(
[[b"J01002526558865"]], dtype="|S16"
),
"AggregateBeginningOrbitNumber": np.array(
[[10015]], dtype=np.uint64
),
"AggregateBeginningTime": np.array(
[[b"061125.120971Z"]], dtype="|S15"
),
"AggregateEndingDate": np.array(
[[b"20191025"]], dtype="|S9"
),
"AggregateEndingGranuleID": np.array(
[[b"J01002526558865"]], dtype="|S16"
),
"AggregateEndingOrbitNumber": np.array(
[[10015]], dtype=np.uint64
),
"AggregateEndingTime": np.array(
[[b"061247.849492Z"]], dtype="|S15"
),
"AggregateNumberGranules": np.array([[1]], dtype=np.uint64),
}
},
"VIIRS-DNB-GEO_Gran_0": {
"attrs": {
"Ascending/Descending_Indicator": np.array(
[[1]], dtype=np.uint8
),
"Beginning_Date": np.array([[b"20191025"]], dtype="|S9"),
"Beginning_Time": np.array(
[[b"061125.120971Z"]], dtype="|S15"
),
"East_Bounding_Coordinate": np.array(
[[-45.09228]], dtype=np.float32
),
"Ending_Date": np.array([[b"20191025"]], dtype="|S9"),
"Ending_Time": np.array(
[[b"061247.849492Z"]], dtype="|S15"
),
"G-Ring_Latitude": np.array(
[
[41.84151],
[44.31062],
[46.78565],
[45.41409],
[41.07657],
[38.81504],
[36.53401],
[40.55788],
],
dtype=np.float32,
),
"G-Ring_Longitude": np.array(
[
[-82.66234],
[-82.55624],
[-82.48891],
[-62.80042],
[-45.09228],
[-46.58502],
[-47.95933],
[-64.54196],
],
dtype=np.float32,
),
"LeapSecondsGranuleStart": np.array([[37]], dtype=np.int32),
"N_Algorithm_Version": np.array(
[[b"1.O.000.014"]], dtype="|S12"
),
"N_Anc_Filename": np.array(
[
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0691_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0692_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0693_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0719_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0720_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0721_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0722_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0723_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0724_1.O.0.0"
],
[
b"Terrain-Eco-ANC-Tile_20030125000000Z_ee00000000000000Z_NA_NA_N0725_1.O.0.0"
],
[
b"off_Planet-Eph-ANC_Static_JPL_000f_20151008_200001010000Z_20000101000000Z_ee00000000000000Z_np" # noqa
],
[
b"off_USNO-PolarWander-UT1-ANC_Ser7_USNO_000f_20191025_201910250000Z_20191025000109Z_ee20191101120000Z_np" # noqa
],
],
dtype="|S104",
),
"N_Aux_Filename": np.array(
[
[
b"CMNGEO-PARAM-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"CmnGeo-SAA-AC_j01_20151008180000Z_20170807130000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"TLE-AUX_j01_20191024053224Z_20191024000000Z_ee00000000000000Z_-_nobc_ops_all-_ops" # noqa
],
[
b"VIIRS-SDR-GEO-DNB-PARAM-LUT_j01_20180507121508Z_20180315000000Z_ee00000000000000Z_PS-1-O-CCR-3963-006-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-GEO-IMG-PARAM-LUT_j01_20180430182354Z_20180315000000Z_ee00000000000000Z_PS-1-O-CCR-3963-006-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-GEO-MOD-PARAM-LUT_j01_20180430182652Z_20180315000000Z_ee00000000000000Z_PS-1-O-CCR-3963-006-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-QA-LUT_j01_20180109121411Z_20180409000000Z_ee00000000000000Z_PS-1-O-CCR-3742-003-LE-PE_all-_all_all-_ops" # noqa
],
],
dtype="|S126",
),
"N_Beginning_Orbit_Number": np.array(
[[10015]], dtype=np.uint64
),
"N_Beginning_Time_IET": np.array(
[[1950675122120971]], dtype=np.uint64
),
"N_Creation_Date": np.array([[b"20191025"]], dtype="|S9"),
"N_Creation_Time": np.array(
[[b"062136.412867Z"]], dtype="|S15"
),
"N_Day_Night_Flag": np.array([[b"Night"]], dtype="|S6"),
"N_Ending_Time_IET": np.array(
[[1950675204849492]], dtype=np.uint64
),
"N_Granule_ID": np.array(
[[b"J01002526558865"]], dtype="|S16"
),
"N_Granule_Status": np.array([[b"N/A"]], dtype="|S4"),
"N_Granule_Version": np.array([[b"A1"]], dtype="|S3"),
"N_IDPS_Mode": np.array([[b"N/A"]], dtype="|S4"),
"N_Input_Prod": np.array(
[
[b"SPACECRAFT-DIARY-RDR:J01002526558800:A1"],
[b"SPACECRAFT-DIARY-RDR:J01002526559000:A1"],
[b"VIIRS-SCIENCE-RDR:J01002526558865:A1"],
],
dtype="|S40",
),
"N_JPSS_Document_Ref": np.array(
[
[
b"474-00448-02-06_JPSS-DD-Vol-II-Part-6_0200H.pdf"
],
[
b"474-00448-02-06_JPSS-VIIRS-SDR-DD-Part-6_0200H_VIIRS-DNB-GEO-PP.xml"
],
[
b"474-00448-03-06_JPSS-OAD-Vol-III-Part-6-VIIRS-RDR-SDR_-1.pdf"
],
],
dtype="|S68",
),
"N_LEOA_Flag": np.array([[b"On"]], dtype="|S3"),
"N_Nadir_Latitude_Max": np.array(
[[45.3722]], dtype=np.float32
),
"N_Nadir_Latitude_Min": np.array(
[[40.6172]], dtype=np.float32
),
"N_Nadir_Longitude_Max": np.array(
[[-62.80047]], dtype=np.float32
),
"N_Nadir_Longitude_Min": np.array(
[[-64.51342]], dtype=np.float32
),
"N_Number_Of_Scans": np.array([[47]], dtype=np.int32),
"N_Primary_Label": np.array(
[[b"Non-Primary"]], dtype="|S12"
),
"N_Quality_Summary_Names": np.array(
[
[b"Automatic Quality Flag"],
[b"Percent Missing Data"],
[b"Percent Out of Bounds"],
],
dtype="|S23",
),
"N_Quality_Summary_Values": np.array(
[[1], [61], [0]], dtype=np.int32
),
"N_Reference_ID": np.array(
[[b"VIIRS-DNB-GEO:J01002526558865:A1"]], dtype="|S33"
),
"N_Software_Version": np.array(
[[b"CSPP_SDR_3_1_3"]], dtype="|S15"
),
"N_Spacecraft_Maneuver": np.array(
[[b"Normal Operations"]], dtype="|S18"
),
"North_Bounding_Coordinate": np.array(
[[46.8018]], dtype=np.float32
),
"South_Bounding_Coordinate": np.array(
[[36.53401]], dtype=np.float32
),
"West_Bounding_Coordinate": np.array(
[[-82.66234]], dtype=np.float32
),
}
},
"attrs": {
"Instrument_Short_Name": np.array([[b"VIIRS"]], dtype="|S6"),
"N_Anc_Type_Tasked": np.array([[b"Official"]], dtype="|S9"),
"N_Collection_Short_Name": np.array(
[[b"VIIRS-DNB-GEO"]], dtype="|S14"
),
"N_Dataset_Type_Tag": np.array([[b"GEO"]], dtype="|S4"),
"N_Processing_Domain": np.array([[b"ops"]], dtype="|S4"),
"Operational_Mode": np.array(
[[b"J01 Normal Operations, VIIRS Operational"]],
dtype="|S41",
),
},
},
"VIIRS-DNB-SDR": {
"VIIRS-DNB-SDR_Aggr": {
"attrs": {
"AggregateBeginningDate": np.array(
[[b"20191025"]], dtype="|S9"
),
"AggregateBeginningGranuleID": np.array(
[[b"J01002526558865"]], dtype="|S16"
),
"AggregateBeginningOrbitNumber": np.array(
[[10015]], dtype=np.uint64
),
"AggregateBeginningTime": np.array(
[[b"061125.120971Z"]], dtype="|S15"
),
"AggregateEndingDate": np.array(
[[b"20191025"]], dtype="|S9"
),
"AggregateEndingGranuleID": np.array(
[[b"J01002526558865"]], dtype="|S16"
),
"AggregateEndingOrbitNumber": np.array(
[[10015]], dtype=np.uint64
),
"AggregateEndingTime": np.array(
[[b"061247.849492Z"]], dtype="|S15"
),
"AggregateNumberGranules": np.array([[1]], dtype=np.uint64),
}
},
"VIIRS-DNB-SDR_Gran_0": {
"attrs": {
"Ascending/Descending_Indicator": np.array(
[[1]], dtype=np.uint8
),
"Band_ID": np.array([[b"N/A"]], dtype="|S4"),
"Beginning_Date": np.array([[b"20191025"]], dtype="|S9"),
"Beginning_Time": np.array(
[[b"061125.120971Z"]], dtype="|S15"
),
"East_Bounding_Coordinate": np.array(
[[-45.09281]], dtype=np.float32
),
"Ending_Date": np.array([[b"20191025"]], dtype="|S9"),
"Ending_Time": np.array(
[[b"061247.849492Z"]], dtype="|S15"
),
"G-Ring_Latitude": np.array(
[
[41.84157],
[44.31069],
[46.78591],
[45.41409],
[41.07675],
[38.81512],
[36.53402],
[40.55788],
],
dtype=np.float32,
),
"G-Ring_Longitude": np.array(
[
[-82.65787],
[-82.55148],
[-82.47269],
[-62.80042],
[-45.09281],
[-46.58528],
[-47.95936],
[-64.54196],
],
dtype=np.float32,
),
"N_Algorithm_Version": np.array(
[[b"1.O.000.015"]], dtype="|S12"
),
"N_Anc_Filename": np.array(
[
[
b"off_Planet-Eph-ANC_Static_JPL_000f_20151008_200001010000Z_20000101000000Z_ee00000000000000Z_np" # noqa
],
[
b"off_USNO-PolarWander-UT1-ANC_Ser7_USNO_000f_20191025_201910250000Z_20191025000109Z_ee20191101120000Z_np" # noqa
],
],
dtype="|S104",
),
"N_Aux_Filename": np.array(
[
[
b"CMNGEO-PARAM-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-DNB-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-I1-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-I2-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-I3-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-I4-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-I5-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M1-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M10-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M11-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M12-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M13-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M14-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M15-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M16-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M2-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M3-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M4-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M5-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M6-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M7-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M8-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-M9-SDR-DQTT_j01_20151008180000Z_20020101010000Z_ee00000000000000Z_PS-1-O-NPP-1-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-RSBAUTOCAL-HISTORY-AUX_j01_20191024021527Z_20191024000000Z_ee00000000000000Z_-_nobc_ops_all-_ops" # noqa
],
[
b"VIIRS-RSBAUTOCAL-VOLT-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-EDD154640-109C-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-BB-TEMP-COEFFS-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-CAL-AUTOMATE-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-Pred-SideA-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-COEFF-A-LUT_j01_20180109114311Z_20180409000000Z_ee00000000000000Z_PS-1-O-CCR-3742-003-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-COEFF-B-LUT_j01_20180109101739Z_20180409000000Z_ee00000000000000Z_PS-1-O-CCR-3742-004-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DELTA-C-LUT_j01_20180109000000Z_20180409000000Z_ee00000000000000Z_PS-1-O-CCR-3742-003-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DG-ANOMALY-DN-LIMITS-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-SideA-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DNB-DN0-LUT_j01_20190930000000Z_20190928000000Z_ee00000000000000Z_PS-1-O-CCR-4262-026-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DNB-FRAME-TO-ZONE-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-Op21-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DNB-GAIN-RATIOS-LUT_j01_20190930000000Z_20190928000000Z_ee00000000000000Z_PS-1-O-CCR-4262-025-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DNB-LGS-GAINS-LUT_j01_20180413122703Z_20180412000000Z_ee00000000000000Z_PS-1-O-CCR-3918-005-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DNB-RVF-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-Op21-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-DNB-STRAY-LIGHT-CORRECTION-LUT_j01_20190930160523Z_20191001000000Z_ee00000000000000Z_PS-1-O-CCR-4322-024-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-EBBT-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-EMISSIVE-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-F-PREDICTED-LUT_j01_20180413123333Z_20180412000000Z_ee00000000000000Z_PS-1-O-CCR-3918-006-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-GAIN-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-HAM-ER-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-OBC-ER-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-OBC-RR-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-OBS-TO-PIXELS-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-SameAsSNPP-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-QA-LUT_j01_20180109121411Z_20180409000000Z_ee00000000000000Z_PS-1-O-CCR-3742-003-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-RADIOMETRIC-PARAM-V3-LUT_j01_20161117000000Z_20180111000000Z_ee00000000000000Z_PS-1-O-CCR-17-3436-v003-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-REFLECTIVE-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-SameAsSNPP-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-RELATIVE-SPECTRAL-RESPONSE-LUT_j01_20161031000000Z_20180111000000Z_ee00000000000000Z_PS-1-O-CCR-17-3436-v003-FusedM9-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-RTA-ER-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-RVF-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-M16-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-SOLAR-IRAD-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-Thuillier2002-LE-PE_all-_all_all-_ops" # noqa
],
[
b"VIIRS-SDR-TELE-COEFFS-LUT_j01_20160331000000Z_20170807130000Z_ee00000000000000Z_PS-1-O-CCR-16-2859-v002-SideA-LE-PE_all-_all_all-_ops" # noqa
],
],
dtype="|S151",
),
"N_Beginning_Orbit_Number": np.array(
[[10015]], dtype=np.uint64
),
"N_Beginning_Time_IET": np.array(
[[1950675122120971]], dtype=np.uint64
),
"N_Creation_Date": np.array([[b"20191025"]], dtype="|S9"),
"N_Creation_Time": np.array(
[[b"062411.116253Z"]], dtype="|S15"
),
"N_Day_Night_Flag": np.array([[b"Night"]], dtype="|S6"),
"N_Ending_Time_IET": np.array(
[[1950675204849492]], dtype=np.uint64
),
"N_Graceful_Degradation": np.array([[b"No"]], dtype="|S3"),
"N_Granule_ID": np.array(
[[b"J01002526558865"]], dtype="|S16"
),
"N_Granule_Status": np.array([[b"N/A"]], dtype="|S4"),
"N_Granule_Version": np.array([[b"A1"]], dtype="|S3"),
"N_IDPS_Mode": np.array([[b"N/A"]], dtype="|S4"),
"N_Input_Prod": np.array(
[
[b"GEO-VIIRS-OBC-IP:J01002526558865:A1"],
[b"SPACECRAFT-DIARY-RDR:J01002526558800:A1"],
[b"SPACECRAFT-DIARY-RDR:J01002526559000:A1"],
[b"VIIRS-DNB-GEO:J01002526558865:A1"],
[b"VIIRS-IMG-RGEO-TC:J01002526558865:A1"],
[b"VIIRS-MOD-RGEO-TC:J01002526558865:A1"],
[b"VIIRS-SCIENCE-RDR:J01002526558012:A1"],
[b"VIIRS-SCIENCE-RDR:J01002526558865:A1"],
],
dtype="|S40",
),
"N_JPSS_Document_Ref": np.array(
[
[
b"474-00448-02-06_JPSS-DD-Vol-II-Part-6_0200H.pdf"
],
[
b"474-00448-02-06_JPSS-VIIRS-SDR-DD-Part-6_0200H_VIIRS-DNB-SDR-PP.xml"
],
[
b"474-00448-03-06_JPSS-OAD-Vol-III-Part-6-VIIRS-RDR-SDR_-1.pdf"
],
],
dtype="|S68",
),
"N_LEOA_Flag": np.array([[b"On"]], dtype="|S3"),
"N_Nadir_Latitude_Max": np.array(
[[45.3722]], dtype=np.float32
),
"N_Nadir_Latitude_Min": np.array(
[[40.6172]], dtype=np.float32
),
"N_Nadir_Longitude_Max": np.array(
[[-62.80047]], dtype=np.float32
),
"N_Nadir_Longitude_Min": np.array(
[[-64.51342]], dtype=np.float32
),
"N_Number_Of_Scans": np.array([[47]], dtype=np.int32),
"N_Percent_Erroneous_Data": np.array(
[[0.0]], dtype=np.float32
),
"N_Percent_Missing_Data": np.array(
[[51.05127]], dtype=np.float32
),
"N_Percent_Not-Applicable_Data": np.array(
[[0.0]], dtype=np.float32
),
"N_Primary_Label": np.array(
[[b"Non-Primary"]], dtype="|S12"
),
"N_Quality_Summary_Names": np.array(
[
[b"Scan Quality Exclusion"],
[b"Summary VIIRS SDR Quality"],
],
dtype="|S26",
),
"N_Quality_Summary_Values": np.array(
[[24], [49]], dtype=np.int32
),
"N_RSB_Index": np.array([[17]], dtype=np.int32),
"N_Reference_ID": np.array(
[[b"VIIRS-DNB-SDR:J01002526558865:A1"]], dtype="|S33"
),
"N_Satellite/Local_Azimuth_Angle_Max": np.array(
[[179.9995]], dtype=np.float32
),
"N_Satellite/Local_Azimuth_Angle_Min": np.array(
[[-179.9976]], dtype=np.float32
),
"N_Satellite/Local_Zenith_Angle_Max": np.array(
[[69.83973]], dtype=np.float32
),
"N_Satellite/Local_Zenith_Angle_Min": np.array(
[[0.00898314]], dtype=np.float32
),
"N_Software_Version": np.array(
[[b"CSPP_SDR_3_1_3"]], dtype="|S15"
),
"N_Solar_Azimuth_Angle_Max": np.array(
[[73.93496]], dtype=np.float32
),
"N_Solar_Azimuth_Angle_Min": np.array(
[[23.83542]], dtype=np.float32
),
"N_Solar_Zenith_Angle_Max": np.array(
[[147.5895]], dtype=np.float32
),
"N_Solar_Zenith_Angle_Min": np.array(
[[126.3929]], dtype=np.float32
),
"N_Spacecraft_Maneuver": np.array(
[[b"Normal Operations"]], dtype="|S18"
),
"North_Bounding_Coordinate": np.array(
[[46.8018]], dtype=np.float32
),
"South_Bounding_Coordinate": np.array(
[[36.53402]], dtype=np.float32
),
"West_Bounding_Coordinate": np.array(
[[-82.65787]], dtype=np.float32
),
}
},
"attrs": {
"Instrument_Short_Name": np.array([[b"VIIRS"]], dtype="|S6"),
"N_Collection_Short_Name": np.array(
[[b"VIIRS-DNB-SDR"]], dtype="|S14"
),
"N_Dataset_Type_Tag": np.array([[b"SDR"]], dtype="|S4"),
"N_Instrument_Flight_SW_Version": np.array(
[[20], [65534]], dtype=np.int32
),
"N_Processing_Domain": np.array([[b"ops"]], dtype="|S4"),
"Operational_Mode": np.array(
[[b"J01 Normal Operations, VIIRS Operational"]],
dtype="|S41",
),
},
},
},
"attrs": {
"CVIIRS_Version": np.array([[b"2.0.1"]], dtype="|S5"),
"Compact_VIIRS_SDR_Version": np.array([[b"3.1"]], dtype="|S3"),
"Distributor": np.array([[b"cspp"]], dtype="|S5"),
"Mission_Name": np.array([[b"JPSS-1"]], dtype="|S7"),
"N_Dataset_Source": np.array([[b"all-"]], dtype="|S5"),
"N_GEO_Ref": np.array(
[
[
b"GDNBO_j01_d20191025_t0611251_e0612478_b10015_c20191025062405837630_cspp_dev.h5"
]
],
dtype="|S78",
),
"N_HDF_Creation_Date": np.array([[b"20191025"]], dtype="|S8"),
"N_HDF_Creation_Time": np.array([[b"062502.927000Z"]], dtype="|S14"),
"Platform_Short_Name": np.array([[b"J01"]], dtype="|S4"),
"Satellite_Id_Filename": np.array([[b"j01"]], dtype="|S3"),
},
}
self.filename = os.path.join(
tempfile.gettempdir(),
"SVDNBC_j01_d20191025_t0611251_e0612478_b10015_c20191025062459000870_eum_ops.h5",
)
h5f = h5py.File(self.filename, mode="w")
def fill_h5(root, stuff):
for key, val in stuff.items():
if key in ["value", "attrs"]:
continue
if "value" in val:
root[key] = val["value"]
else:
grp = root.create_group(key)
fill_h5(grp, stuff[key])
if "attrs" in val:
for attrs, val in val["attrs"].items():
root[key].attrs[attrs] = val
fill_h5(h5f, fake_dnb)
for attr, val in fake_dnb["attrs"].items():
h5f.attrs[attr] = val
h5f.close()
self.client = None
def _dataset_iterator(self):
from satpy.readers.viirs_compact import VIIRSCompactFileHandler
from satpy.tests.utils import make_dataid
filename_info = {}
filetype_info = {'file_type': 'compact_dnb'}
test = VIIRSCompactFileHandler(self.filename, filename_info, filetype_info)
dsid = make_dataid(name='DNB', calibration='radiance')
ds1 = test.get_dataset(dsid, {})
dsid = make_dataid(name='longitude_dnb')
ds2 = test.get_dataset(dsid, {'standard_name': 'longitude'})
dsid = make_dataid(name='latitude_dnb')
ds3 = test.get_dataset(dsid, {'standard_name': 'latitude'})
dsid = make_dataid(name='solar_zenith_angle')
ds4 = test.get_dataset(dsid, {'standard_name': 'solar_zenith_angle'})
for ds in [ds1, ds2, ds3, ds4]:
yield ds
def test_get_dataset(self):
"""Retrieve datasets from a DNB file."""
for ds in self._dataset_iterator():
self.assertEqual(ds.shape, (752, 4064))
self.assertEqual(ds.dtype, np.float32)
self.assertEqual(ds.compute().shape, (752, 4064))
self.assertEqual(ds.attrs['rows_per_scan'], 16)
def test_distributed(self):
"""Check that distributed computations work."""
from dask.distributed import Client
self.client = Client()
for ds in self._dataset_iterator():
# Check that the computation is running fine.
self.assertEqual(ds.compute().shape, (752, 4064))
def tearDown(self):
"""Destroy."""
with suppress(OSError):
os.remove(self.filename)
with suppress(AttributeError):
self.client.close()
| gpl-3.0 | -7,021,463,102,585,446,000 | 47.97504 | 201 | 0.266806 | false |
TheGrimJam/fictiongen | fictiongen_app/markov_functions/views.py | 1 | 1132 | from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.template import RequestContext
import markov_functions.mk_functions as ma
from core import development_tools as log
from markov_functions.models import Book
import json
@csrf_exempt # Not particularly concerned about illegitimate requests yet.
def ma_process(request):
""" Send text to algorithm and return generated text """
context_instance=RequestContext(request)
if request.method == 'POST':
request_body = request.body.decode('utf-8')
request_data = json.loads(request_body) # This contains all of the settings
fictionObj = ma.fictionObject( request_data ) # Create the model of combined texts
markovedText = fictionObj.get_text( request_data ) # Return the text
data = json.dumps(markovedText)
if data == "":
data = "Insufficient text"
return HttpResponse(data)
return HttpResponse("Request was not sent as POST request.")
| lgpl-3.0 | -2,273,507,408,531,493,400 | 39.925926 | 90 | 0.715548 | false |
googleapis/googleapis-gen | google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/security_settings.py | 1 | 8909 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3beta1',
manifest={
'GetSecuritySettingsRequest',
'UpdateSecuritySettingsRequest',
'ListSecuritySettingsRequest',
'ListSecuritySettingsResponse',
'CreateSecuritySettingsRequest',
'DeleteSecuritySettingsRequest',
'SecuritySettings',
},
)
class GetSecuritySettingsRequest(proto.Message):
r"""The request message for
[SecuritySettingsService.GetSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.GetSecuritySettings].
Attributes:
name (str):
Required. Resource name of the settings. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<security settings ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class UpdateSecuritySettingsRequest(proto.Message):
r"""The request message for
[SecuritySettingsService.UpdateSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.UpdateSecuritySettings].
Attributes:
security_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings):
Required. [SecuritySettings] object that contains values for
each of the fields to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields
get updated. If the mask is not present, all
fields will be updated.
"""
security_settings = proto.Field(
proto.MESSAGE,
number=1,
message='SecuritySettings',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class ListSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.ListSecuritySettings][].
Attributes:
parent (str):
Required. The location to list all security settings for.
Format: ``projects/<Project ID>/locations/<Location ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListSecuritySettingsResponse(proto.Message):
r"""The response message for [SecuritySettings.ListSecuritySettings][].
Attributes:
security_settings (Sequence[google.cloud.dialogflowcx_v3beta1.types.SecuritySettings]):
The list of security settings.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
security_settings = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='SecuritySettings',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class CreateSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.CreateSecuritySettings][].
Attributes:
parent (str):
Required. The location to create an
[SecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettings]
for. Format:
``projects/<Project ID>/locations/<Location ID>``.
security_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings):
Required. The security settings to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
security_settings = proto.Field(
proto.MESSAGE,
number=2,
message='SecuritySettings',
)
class DeleteSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.DeleteSecuritySettings][].
Attributes:
name (str):
Required. The name of the
[SecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettings]
to delete. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<Security Settings ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class SecuritySettings(proto.Message):
r"""Represents the settings related to security issues, such as
data redaction and data retention. It may take hours for updates
on the settings to propagate to all the related components and
take effect.
Attributes:
name (str):
Required. Resource name of the settings. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<Security Settings ID>``.
display_name (str):
Required. The human-readable name of the
security settings, unique within the location.
redaction_strategy (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.RedactionStrategy):
Strategy that defines how we do redaction.
redaction_scope (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.RedactionScope):
Defines the data for which Dialogflow applies
redaction. Dialogflow does not redact data that
it does not have access to – for example, Cloud
logging.
inspect_template (str):
DLP inspect template name. Use this template to define
inspect base settings.
If empty, we use the default DLP inspect config.
The template name will have one of the following formats:
``projects/<Project ID>/inspectTemplates/<Template ID>`` OR
``projects/<Project ID>/locations/<Location ID>/inspectTemplates/<Template ID>``
OR
``organizations/<Organization ID>/inspectTemplates/<Template ID>``
retention_window_days (int):
Retains data in interaction logging for the
specified number of days. This does not apply to
Cloud logging, which is owned by the user - not
Dialogflow.
User must Set a value lower than Dialogflow's
default 30d TTL. Setting a value higher than
that has no effect.
A missing value or setting to 0 also means we
use Dialogflow's default TTL.
Note: Interaction logging is a limited access
feature. Talk to your Google representative to
check availability for you.
purge_data_types (Sequence[google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.PurgeDataType]):
List of types of data to remove when
retention settings triggers purge.
"""
class RedactionStrategy(proto.Enum):
r"""Defines how we redact data."""
REDACTION_STRATEGY_UNSPECIFIED = 0
REDACT_WITH_SERVICE = 1
class RedactionScope(proto.Enum):
r"""Defines what types of data to redact."""
REDACTION_SCOPE_UNSPECIFIED = 0
REDACT_DISK_STORAGE = 2
class PurgeDataType(proto.Enum):
r"""Type of data we purge after retention settings triggers
purge.
"""
PURGE_DATA_TYPE_UNSPECIFIED = 0
DIALOGFLOW_HISTORY = 1
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
redaction_strategy = proto.Field(
proto.ENUM,
number=3,
enum=RedactionStrategy,
)
redaction_scope = proto.Field(
proto.ENUM,
number=4,
enum=RedactionScope,
)
inspect_template = proto.Field(
proto.STRING,
number=9,
)
retention_window_days = proto.Field(
proto.INT32,
number=6,
oneof='data_retention',
)
purge_data_types = proto.RepeatedField(
proto.ENUM,
number=8,
enum=PurgeDataType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 8,159,482,049,782,885,000 | 32.111524 | 136 | 0.642079 | false |
adrxbmc/adrxbmc.repository | plugin.audio.googlemusic/gmusicapi/utils/utils.py | 1 | 17273 | # -*- coding: utf-8 -*-
"""Utility functions used across api code."""
import errno
import functools
import inspect
import logging
import os
import re
import subprocess
import time
import traceback
import warnings
from decorator import decorator
try:
from google.protobuf.descriptor import FieldDescriptor
#Map descriptor.CPPTYPE -> python type.
_python_to_cpp_types = {
long: ('int32', 'int64', 'uint32', 'uint64'),
float: ('double', 'float'),
bool: ('bool',),
str: ('string',),
}
cpp_type_to_python = dict(
(getattr(FieldDescriptor, 'CPPTYPE_' + cpp.upper()), python)
for (python, cpplist) in _python_to_cpp_types.items()
for cpp in cpplist
)
except: pass
from gmusicapi import __version__
from gmusicapi.compat import my_appdirs
from gmusicapi.exceptions import CallFailure, GmusicapiWarning
# this controls the crazy logging setup that checks the callstack;
# it should be monkey-patched to False after importing to disable it.
# when False, static code will simply log in the standard way under the root.
per_client_logging = True
log_filepath = os.path.join(my_appdirs.user_log_dir, 'gmusicapi.log')
printed_log_start_message = False # global, set in config_debug_logging
# matches a mac address in GM form, eg
# 00:11:22:33:AA:BB
_mac_pattern = re.compile("^({pair}:){{5}}{pair}$".format(pair='[0-9A-F]' * 2))
class DynamicClientLogger(object):
"""Dynamically proxies to the logger of a Client higher in the call stack.
This is a ridiculous hack needed because
logging is, in the eyes of a user, per-client.
So, logging from static code (eg protocol, utils) needs to log using the
config of the calling client's logger.
There can be multiple clients, so we can't just use a globally-available
logger.
Instead of refactoring every function to receieve a logger, we introspect
the callstack at runtime to figure out who's calling us, then use their
logger.
This probably won't work on non-CPython implementations.
"""
def __init__(self, caller_name):
self.caller_name = caller_name
def __getattr__(self, name):
# this isn't a totally foolproof way to proxy, but it's fine for
# the usual logger.debug, etc methods.
logger = logging.getLogger(self.caller_name)
if per_client_logging:
# search upwards for a client instance
for frame_rec in inspect.getouterframes(inspect.currentframe()):
frame = frame_rec[0]
try:
if 'self' in frame.f_locals:
f_self = frame.f_locals['self']
# can't import and check against classes; that causes an import cycle
if ((f_self is not None and
f_self.__module__.startswith('gmusicapi.clients') and
f_self.__class__.__name__ in ('Musicmanager', 'Webclient',
'Mobileclient'))):
logger = f_self.logger
break
finally:
del frame # avoid circular references
else:
# log to root logger.
# should this be stronger? There's no default root logger set up.
stack = traceback.extract_stack()
logger.info('could not locate client caller in stack:\n%s',
'\n'.join(traceback.format_list(stack)))
return getattr(logger, name)
log = DynamicClientLogger(__name__)
def deprecated(instructions):
"""Flags a method as deprecated.
:param instructions: human-readable note to assist migration.
"""
@decorator
def wrapper(func, *args, **kwargs):
message = "{0} is deprecated and may break unexpectedly.\n{1}".format(
func.__name__,
instructions)
warnings.warn(message,
GmusicapiWarning,
stacklevel=2)
return func(*args, **kwargs)
return wrapper
def id_or_nid(song_dict):
"""Equivalent to ``d.get('id') or d['nid']``.
Uploaded songs have an id key, while AA tracks
have a nid key, which can often be used interchangably.
"""
return song_dict.get('id') or song_dict['nid']
def datetime_to_microseconds(dt):
"""Return microseconds since epoch, as an int.
:param dt: a datetime.datetime
"""
return int(time.mktime(dt.timetuple()) * 1000000) + dt.microsecond
def is_valid_mac(mac_string):
"""Return True if mac_string is of form
eg '00:11:22:33:AA:BB'.
"""
if not _mac_pattern.match(mac_string):
return False
return True
def create_mac_string(num, splitter=':'):
"""Return the mac address interpretation of num,
in the form eg '00:11:22:33:AA:BB'.
:param num: a 48-bit integer (eg from uuid.getnode)
:param spliiter: a string to join the hex pairs with
"""
mac = hex(num)[2:]
# trim trailing L for long consts
if mac[-1] == 'L':
mac = mac[:-1]
pad = max(12 - len(mac), 0)
mac = '0' * pad + mac
mac = splitter.join([mac[x:x + 2] for x in range(0, 12, 2)])
mac = mac.upper()
return mac
# from http://stackoverflow.com/a/5032238/1231454
def make_sure_path_exists(path, mode=None):
try:
if mode is not None:
os.makedirs(path, mode)
else:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# from http://stackoverflow.com/a/8101118/1231454
class DocstringInheritMeta(type):
"""A variation on
http://groups.google.com/group/comp.lang.python/msg/26f7b4fcb4d66c95
by Paul McGuire
"""
def __new__(meta, name, bases, clsdict):
if not('__doc__' in clsdict and clsdict['__doc__']):
for mro_cls in (mro_cls for base in bases for mro_cls in base.mro()):
doc = mro_cls.__doc__
if doc:
clsdict['__doc__'] = doc
break
for attr, attribute in clsdict.items():
if not attribute.__doc__:
for mro_cls in (mro_cls for base in bases for mro_cls in base.mro()
if hasattr(mro_cls, attr)):
doc = getattr(getattr(mro_cls, attr), '__doc__')
if doc:
attribute.__doc__ = doc
break
return type.__new__(meta, name, bases, clsdict)
def dual_decorator(func):
"""This is a decorator that converts a paramaterized decorator for no-param use.
source: http://stackoverflow.com/questions/3888158.
"""
@functools.wraps(func)
def inner(*args, **kw):
if ((len(args) == 1 and not kw and callable(args[0]) and
not (type(args[0]) == type and issubclass(args[0], BaseException)))):
return func()(args[0])
else:
return func(*args, **kw)
return inner
@dual_decorator
def enforce_id_param(position=1):
"""Verifies that the caller is passing a single song id, and not
a song dictionary.
:param position: (optional) the position of the expected id - defaults to 1.
"""
@decorator
def wrapper(function, *args, **kw):
if not isinstance(args[position], basestring):
raise ValueError("Invalid param type in position %s;"
" expected an id (did you pass a dictionary?)" % position)
return function(*args, **kw)
return wrapper
@dual_decorator
def enforce_ids_param(position=1):
"""Verifies that the caller is passing a list of song ids, and not a
list of song dictionaries.
:param position: (optional) the position of the expected list - defaults to 1.
"""
@decorator
def wrapper(function, *args, **kw):
if ((not isinstance(args[position], (list, tuple)) or
not all([isinstance(e, basestring) for e in args[position]]))):
raise ValueError("Invalid param type in position %s;"
" expected ids (did you pass dictionaries?)" % position)
return function(*args, **kw)
return wrapper
def configure_debug_log_handlers(logger):
"""Warnings and above to stderr, below to gmusicapi.log when possible.
Output includes line number."""
global printed_log_start_message
logger.setLevel(logging.DEBUG)
logging_to_file = True
try:
make_sure_path_exists(os.path.dirname(log_filepath), 0o700)
debug_handler = logging.FileHandler(log_filepath)
except OSError:
logging_to_file = False
debug_handler = logging.StreamHandler()
debug_handler.setLevel(logging.DEBUG)
important_handler = logging.StreamHandler()
important_handler.setLevel(logging.WARNING)
logger.addHandler(debug_handler)
logger.addHandler(important_handler)
if not printed_log_start_message:
# print out startup message without verbose formatting
logger.info("!-- begin debug log --!")
logger.info("version: " + __version__)
if logging_to_file:
logger.info("logging to: " + log_filepath)
printed_log_start_message = True
formatter = logging.Formatter(
'%(asctime)s - %(name)s (%(module)s:%(lineno)s) [%(levelname)s]: %(message)s'
)
debug_handler.setFormatter(formatter)
important_handler.setFormatter(formatter)
@dual_decorator
def retry(retry_exception=None, tries=5, delay=2, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
An exception from a final attempt will propogate.
:param retry_exception: exception (or tuple of exceptions) to check for and retry on.
If None, use (AssertionError, CallFailure).
:param tries: number of times to try (not retry) before giving up
:param delay: initial delay between retries in seconds
:param backoff: backoff multiplier
:param logger: logger to use. If None, use 'gmusicapi.utils' logger
Modified from
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python.
"""
if logger is None:
logger = logging.getLogger('gmusicapi.utils')
if retry_exception is None:
retry_exception = (AssertionError, CallFailure)
@decorator
def retry_wrapper(f, *args, **kwargs):
mtries, mdelay = tries, delay # make our own mutable copies
while mtries > 1:
try:
return f(*args, **kwargs)
except retry_exception as e:
logger.info("%s, retrying in %s seconds...", e, mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return retry_wrapper
def pb_set(msg, field_name, val):
"""Return True and set val to field_name in msg if the assignment
is type-compatible, else return False.
val will be coerced to a proper type if needed.
:param msg: an instance of a protobuf.message
:param field_name:
:param val
"""
# Find the proper type.
field_desc = msg.DESCRIPTOR.fields_by_name[field_name]
proper_type = cpp_type_to_python[field_desc.cpp_type]
# Try with the given type first.
# Their set hooks will automatically coerce.
try_types = (type(val), proper_type)
for t in try_types:
log.debug("attempt %s.%s = %s(%r)", msg.__class__.__name__, field_name, t, val)
try:
setattr(msg, field_name, t(val))
log.debug("! success")
break
except (TypeError, ValueError):
log.debug("X failure")
else:
return False # no assignments stuck
return True
def transcode_to_mp3(filepath, quality=3, slice_start=None, slice_duration=None):
"""Return the bytestring result of transcoding the file at *filepath* to mp3.
An ID3 header is not included in the result.
:param filepath: location of file
:param quality: if int, pass to -q:a. if string, pass to -b:a
-q:a roughly corresponds to libmp3lame -V0, -V1...
:param slice_start: (optional) transcode a slice, starting at this many seconds
:param slice_duration: (optional) when used with slice_start, the number of seconds in the slice
Raise:
* IOError: problems during transcoding
* ValueError: invalid params, transcoder not found
"""
err_output = None
cmd = ['avconv', '-i', filepath]
if slice_duration is not None:
cmd.extend(['-t', str(slice_duration)])
if slice_start is not None:
cmd.extend(['-ss', str(slice_start)])
if isinstance(quality, int):
cmd.extend(['-q:a', str(quality)])
elif isinstance(quality, basestring):
cmd.extend(['-b:a', quality])
else:
raise ValueError("quality must be int or string, but received %r" % quality)
cmd.extend(['-f', 's16le', # don't output id3 headers
'-c', 'libmp3lame',
'pipe:1'])
log.debug('running transcode command %r', cmd)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
audio_out, err_output = proc.communicate()
if proc.returncode != 0:
err_output = ("(return code: %r)\n" % proc.returncode) + err_output
raise IOError # handle errors in except
except (OSError, IOError) as e:
err_msg = "transcoding command (%s) failed: %s. " % (' '.join(cmd), e)
if 'No such file or directory' in str(e):
err_msg += '\nffmpeg or avconv must be installed and in the system path.'
if err_output is not None:
err_msg += "\nstderr: '%s'" % err_output
log.exception('transcoding failure:\n%s', err_msg)
raise IOError(err_msg)
else:
return audio_out
def truncate(x, max_els=100, recurse_levels=0):
"""Return a 'shorter' truncated x of the same type, useful for logging.
recurse_levels is only valid for homogeneous lists/tuples.
max_els ignored for song dictionaries."""
# Coerce tuple to list to ease truncation.
is_tuple = False
if isinstance(x, tuple):
is_tuple = True
x = list(x)
try:
if len(x) > max_els:
if isinstance(x, basestring):
return x[:max_els] + '...'
if isinstance(x, dict):
if 'id' in x and 'titleNorm' in x:
# assume to be a song dict
trunc = dict((k, x.get(k)) for k in ['title', 'artist', 'album'])
trunc['...'] = '...'
return trunc
else:
return dict(x.items()[:max_els] + [('...', '...')])
if isinstance(x, list):
trunc = x[:max_els] + ['...']
if recurse_levels > 0:
trunc = [truncate(e, recurse_levels - 1) for e in trunc[:-1]]
if is_tuple:
trunc = tuple(trunc)
return trunc
except TypeError:
# does not have len
pass
return x
@dual_decorator
def empty_arg_shortcircuit(return_code='[]', position=1):
"""Decorate a function to shortcircuit and return something immediately if
the length of a positional arg is 0.
:param return_code: (optional) code to exec as the return value - default is a list.
:param position: (optional) the position of the expected list - default is 1.
"""
# The normal pattern when making a collection an optional arg is to use
# a sentinel (like None). Otherwise, you run the risk of the collection
# being mutated - there's only one, not a new one on each call.
# Here we've got multiple things we'd like to
# return, so we can't do that. Rather than make some kind of enum for
# 'accepted return values' I'm just allowing freedom to return anything.
# Less safe? Yes. More convenient? Definitely.
@decorator
def wrapper(function, *args, **kw):
if len(args[position]) == 0:
# avoid polluting our namespace
ns = {}
exec 'retval = ' + return_code in ns
return ns['retval']
else:
return function(*args, **kw)
return wrapper
def accept_singleton(expected_type, position=1):
"""Allows a function expecting a list to accept a single item as well.
The item will be wrapped in a list.
Will not work for nested lists.
:param expected_type: the type of the items in the list
:param position: (optional) the position of the expected list - defaults to 1.
"""
@decorator
def wrapper(function, *args, **kw):
if isinstance(args[position], expected_type):
# args are a tuple, can't assign into them
args = list(args)
args[position] = [args[position]]
args = tuple(args)
return function(*args, **kw)
return wrapper
# Used to mark a field as unimplemented.
@property
def NotImplementedField(self):
raise NotImplementedError
| gpl-2.0 | -8,116,702,483,677,432,000 | 30.405455 | 100 | 0.600706 | false |
City-of-Bloomington/green-rental | scripts/convert-columbia.py | 2 | 16593 | #!/usr/bin/env python
"""
#
# By: Charles Brandt [code at charlesbrandt dot com]
# On: *2013.12.17 20:44:16
# License: GPLv3
# Requires:
#
# geopy
# Description:
Columbia provides lat/long coordinates with addresses, so import should be simpler.
"""
import os, sys, codecs, re, time
import csv
#import unicodecsv
from helpers import save_json, load_json, Location, Geo, save_results, make_person
#from building.models import make_building, make_unit
from building.models import lookup_building_with_geo
from rentrocket.helpers import SearchResults, handle_place, address_search
#from django.conf import settings
#settings.configure()
sys.path.append(os.path.dirname(os.getcwd()))
#http://stackoverflow.com/questions/8047204/django-script-to-access-model-objects-without-using-manage-py-shell
## from rentrocket import settings
## from django.core.management import setup_environ
## setup_environ(settings)
from city.models import City, to_tag
from source.models import FeedInfo, Source
from person.models import Person
def usage():
print __doc__
#for storing fixes for addresses:
conversions = { '101 HOLLY RIDGE LN': '101 HOLLYRIDGE LN',
'4200 MERCHANT ST': '4200 MERCHANT STREET',
#'3603 BERKSHIRE CT': '',
'2405 FLORIDA CT': '2405 FLORIDA',
#works in google maps, but not here
#'1012 COLBY DR': '1012 Colby Drive',
'1012 COLBY DR': '',
#'3905 ATHENS CT': '',
#'3901 ATHENS CT': '',
#'4000 LAMAR CT': '',
#'3902 CAMERON CT': '',
#'1708 PERKINS DR': '',
#'4802 MONITEAU CT': '',
'8 N KEENE ST BLDG E&F': '8 N KEENE ST',
'7000 N BUCKINGHAM SQ': '7000 N BUCKINGHAM SQUARE',
'8 N KEENE ST BLDG G&H': '8 N KEENE ST',
'5513 HUNLEY CT': '5513 HUNLEY',
'1804 LIGHTVIEW DR': '',
'1704 HIGHRIDGE DR': '',
'2211 LACLEDE DR': '',
'5402 GEMSTONE WAY': '',
}
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
yield [unicode(cell, 'utf-8') for cell in row]
def read_csv(source_csv, city_name, city_tag):
city_options = City.objects.filter(tag=city_tag)
print "Number of cities available: %s" % len(city_options)
if not len(city_options):
raise ValueError, "CITY NOT FOUND! run make_cities.py first"
## city = City()
## city.name = city_name
## city.tag = to_tag(city.name)
## city.save()
else:
city = city_options[0]
print city
feed_date = "2013-10-16"
feeds = FeedInfo.objects.filter(city=city).filter(added=feed_date)
if feeds.exists():
feed = feeds[0]
print "Already had feed: %s, %s" % (feed.city, feed.added)
else:
feed = FeedInfo()
feed.city = city
feed.added = feed_date
feed.version = "0.1"
feed.save()
print "Created new feed: %s" % feed.city.name
people = Person.objects.filter(name="Blank")
if people.exists():
person = people[0]
print "Already had person: %s" % (person.name)
else:
person = Person()
person.name = "Blank"
person.save()
print "Created new person: %s" % person.name
sources = Source.objects.filter(feed=feed)
if sources.exists():
feed_source = sources[0]
print "Already had source: %s, %s" % (feed_source.feed.city, feed_source.feed.added)
else:
feed_source = Source()
feed_source.feed = feed
feed_source.person = person
feed_source.save()
print "Created new source: %s" % feed_source.feed.city.name
cache_file = "%s-20150525.json" % city.tag
cache_destination = os.path.join(os.path.dirname(source_csv), cache_file)
#keep a local copy of data we've processed...
#this should help with subsequent calls
#to make sure we don't need to duplicate calls to remote geolocation APIs:
local_cache = load_json(cache_destination, create=True)
if not local_cache.has_key('buildings'):
local_cache['buildings'] = {}
search_results = {}
for key, value in local_cache['buildings'].items():
#search_results[key] = Location(value)
sr = SearchResults()
sr.from_dict(value)
#print
#print sr
#print
search_results[key] = sr
#geocoder helper:
#geo = Geo()
skips = 0
with open(source_csv) as csvfile:
reader = unicode_csv_reader(csvfile)
#just print the first row:
print '>, <'.join(reader.next())
count = 0
#want to randomize the order... distribute options more evenly
#print len(reader)
#exit()
#in order to randomize, should randomize the order in the csv
for row in reader:
count += 1
print "Looking at row: %s" % count
any_updated = False
#could exit out early here, if needed
if count > 10:
#exit()
pass
#if you want to skip ahead more quickly:
if count < 27187:
pass
else:
#print row
objectid = row[0]
## no_units = row[12]
#can pass this in as bldg_id to make_building
#that gets used for parcel too
parcel_id = row[1]
bldg_id = parcel_id
street_num = row[2]
street_dir = row[3]
street_name = row[4]
street_sfx = row[5]
#eg building number
qualifier_pre = row[6]
#eg "UNIT" or "APT"
qualifier_post = row[7]
apt_num = row[8]
#skip row9 (in/out... whatever that means)
zip_code = row[10]
#skip row11, assessor id
#skip row12, address num
#skip row13, x
#skip row14, y
#xcoord == lng
lng = row[15]
lat = row[16]
#entry floor number: (named 'z' in sheet)
floor = row[17]
#skip row18, strcid... not sure
#skip row19, parent
#skip row20, app_
#skip row21, hteloc
zone = row[22]
bldg_type = row[23]
#number of buildings
bldg_num = row[24]
no_units = row[25]
#skip row[26], inspection type
#skip row27, app number
#skip row28, date received
#skip row29, application type
#skip row30, ownerid
#skip row31, operator id
#skip row32, agent_id
#skip row33, mail to
central_heat = row[34]
if central_heat == 'Y':
central_heat = True
else:
central_heat = False
#heat mechanism? heat mechanic??? not sure
heat_mech = row[35]
#skip row36, agent id (2)
#skip row37, agent last name
#skip row38 agent first name
#skip row39 agent middle initial
#skip row40, agent title
#skip row41, business name
#could be owner, could be agent
owner_name = row[42]
owner_address1 = row[43]
owner_address2 = row[44]
owner_city = row[45]
owner_state = row[46]
owner_zip = row[47]
#address = " ".join([street_num, street_dir, street_name, street_sfx, qualifier_pre, qualifier_post, apt_num])
#this is causing problems with lookups in google
if qualifier_pre == "DUP" or qualifier_pre == "DUPE" or qualifier_pre == "2-Jan" or qualifier_pre == "HM" or qualifier_pre == "DWN":
qualifier_pre = ''
address_main = " ".join([street_num, street_dir, street_name, street_sfx, qualifier_pre])
address_main = address_main.strip()
#get rid of any double spaces
address_main = address_main.replace(" ", " ")
#similar to conversions,
#but there are too many of these to list there
if re.search('HOLLY RIDGE LN', address_main):
address_main = address_main.replace('HOLLY RIDGE LN', 'HOLLYRIDGE LN')
if re.search('BERKSHIRE CT', address_main):
address_main = address_main.replace('BERKSHIRE CT', 'BERKSHIRE')
#address_main = ''
if re.search('CAMERON CT', address_main):
address_main = address_main.replace('CAMERON CT', 'CAMERON')
#address_main = ''
if re.search('ATHENS CT', address_main):
address_main = address_main.replace('ATHENS CT', 'ATHENS')
#address_main = ''
if re.search('LAMAR CT', address_main):
address_main = address_main.replace('LAMAR CT', 'LAMAR')
#address_main = ''
if re.search('MONITEAU CT', address_main):
address_main = address_main.replace('MONITEAU CT', 'MONITEAU')
#address_main = ''
if re.search('IMPERIAL CT', address_main):
address_main = ''
if re.search('PERKINS DR', address_main):
address_main = ''
if re.search('GRANITE OAKS CT', address_main):
address_main = ''
#sometimes the 'BLDG' data is added in the wrong place
#then it gets treated as a unit item
#(but it's not *always* a unit item, so can't generalize it that way)
if qualifier_post == "BLDG" or qualifier_post == "LOT":
address_main = " ".join([address_main, qualifier_post, apt_main])
address_main = address_main.strip()
apt_main = ''
else:
apt_main = " ".join([qualifier_post, apt_num])
apt_main = apt_main.strip()
#check if this is one we want to skip
if conversions.has_key(address_main.upper()):
address_main = conversions[address_main.upper()]
if address_main:
print "APT_MAIN: ", apt_main
address = ", ".join( [address_main, apt_main] )
else:
address = ''
owner_address = ", ".join([owner_address1, owner_address2, owner_city, owner_state, owner_zip])
## if (not status in ['EXPIRED', 'CLOSED']) and (permit_type in ['RENTAL']):
print "Parcel ID:", parcel_id
print address
results = None
#make sure it's not one we're skipping:
if not address:
print "SKIPPING ITEM: %s" % row[1]
skips += 1
skipf = codecs.open("skips.txt", 'a', encoding='utf-8')
original = " ".join([street_num, street_dir, street_name, street_sfx, qualifier_pre])
skipf.write(original)
skipf.write('\n')
skipf.close()
else:
#check if we've started processing any results for this row
if search_results.has_key(address.upper()):
print "Already had building: %s" % address
results = search_results[address.upper()]
#print results
else:
addy = ", ".join( [address_main, city.name, city.state] )
addy += " " + zip_code
#addy += ", USA"
print addy
#toggle betweeen an actual google query
results = address_search(addy, apt_main)
#print dir(results)
if len(results.matches) > 1:
print results
for option in results.matches:
print "%s: %s, %s" % (option['place'], option['lat'], option['lng'])
print
print "Source Lat: %s, Lng: %s" % (lat, lng)
src_lat = int(float(lat) * 100)
src_lng = int(float(lng) * 100)
matched = False
for current in results.matches:
#current = results.matches[0]
print current['lat']
print current['lng']
#only want to look at the first 2 decimal places:
comp_lat = int(float(current['lat']) * 100)
comp_lng = int(float(current['lng']) * 100)
print comp_lat
print comp_lng
if (src_lat == comp_lat) and (src_lng == comp_lng):
#results.matches = results.matches[:1]
results.matches = [ current ]
matched = True
if not matched:
print "DIDN'T MATCH!"
exit()
any_updated = True
# or just using results as specified in csv
# (THIS DOES NOT NORMALIZE THE ADDRESS VIA GOOGLE)
#results = SearchResults()
#results.unit_text = apt_main
#handle_place(results, addy, lat, lng, apt_main)
assert results
#print results
lookup_building_with_geo(results, make=True, parcel_id=parcel_id)
#print results
#current['results'] = results
#print results
if results.errors:
print results
raise ValueError, results.errors
else:
search_results[address.upper()] = results
bldg = results.building
assert bldg
unit = results.unit
# may be a case where the unit is blank
# and another unit with an number/letter was created earlier
# in that case, we won't be creating one here
# and the building will already exist...
# not necessarily an error though
# just redundant data
#assert unit
(person, bldg_person) = make_person(owner_name, bldg, "Agent", address=owner_address)
#time.sleep(1)
if any_updated:
#back it up for later
#enable this when downloading GPS coordinates...
#the rest of the time it slows things down
local_cache['buildings'] = {}
for key, value in search_results.items():
#search_results[key] = SearchResults().from_dict(value)
local_cache['buildings'][key] = value.to_dict()
save_json(cache_destination, local_cache)
print
#exit()
#destination = '%s.tsv' % city_tag
#save_results(search_results, destination)
if __name__ == '__main__':
#original order:
#read_csv('/c/clients/green_rentals/cities/columbia/rental/Columbia_data_20131016.csv', "Columbia", "columbia_mo")
read_csv('/c/clients/rentrocket/cities/columbia/rental/Columbia_data_20131016-randomized.csv', "Columbia", "columbia_mo")
| agpl-3.0 | 8,687,614,417,980,200,000 | 36.371622 | 148 | 0.487796 | false |
Snuggle/hypixel.py | hypixel.py | 1 | 14054 | """ Simple Hypixel-API in Python, by Snuggle | 2017-09-30 to 2018-06-14 """
__version__ = '0.8.0'
# pylint: disable=C0103
# TODO: Add more comments. Explain what's happening!
# TODO: Add API-usage stat-tracking. Like a counter of the number of requests and how many per minute etc.
from random import choice
from time import time
import grequests
import leveling
HYPIXEL_API_URL = 'https://api.hypixel.net/'
UUIDResolverAPI = "https://sessionserver.mojang.com/session/minecraft/profile/"
HYPIXEL_API_KEY_LENGTH = 36 # This is the length of a Hypixel-API key. Don't change from 36.
verified_api_keys = []
requestCache = {}
cacheTime = 60
class PlayerNotFoundException(Exception):
""" Simple exception if a player/UUID is not found. This exception can usually be ignored.
You can catch this exception with ``except hypixel.PlayerNotFoundException:`` """
pass
class SkyblockUUIDRequired(Exception):
"""Simple exception to tell the user that in the Skyblock API, UUID's are required and names cannot be used.
Catch this exception with ``except hypixel.SkyblockUUIDRequired:``"""
pass
class GuildIDNotValid(Exception):
""" Simple exception if a Guild is not found using a GuildID. This exception can usually be ignored.
You can catch this exception with ``except hypixel.GuildIDNotValid:`` """
pass
class HypixelAPIError(Exception):
""" Simple exception if something's gone very wrong and the program can't continue. """
pass
def getJSON(typeOfRequest, **kwargs):
""" This private function is used for getting JSON from Hypixel's Public API. """
requestEnd = ''
if typeOfRequest == 'key':
api_key = kwargs['key']
else:
api_key = choice(verified_api_keys) # Select a random API key from the list available.
if typeOfRequest == 'player':
UUIDType = 'uuid'
uuid = kwargs['uuid']
if len(uuid) <= 16:
UUIDType = 'name' # TODO: I could probably clean this up somehow.
if typeOfRequest == 'skyblockplayer':
typeOfRequest = "/skyblock/profiles"
for name, value in kwargs.items():
if typeOfRequest == "player" and name == "uuid":
name = UUIDType
requestEnd += '&{}={}'.format(name, value)
cacheURL = HYPIXEL_API_URL + '{}?key={}{}'.format(typeOfRequest, "None", requestEnd) # TODO: Lowercase
allURLS = [HYPIXEL_API_URL + '{}?key={}{}'.format(typeOfRequest, api_key, requestEnd)] # Create request URL.
# If url exists in request cache, and time hasn't expired...
if cacheURL in requestCache and requestCache[cacheURL]['cacheTime'] > time():
response = requestCache[cacheURL]['data'] # TODO: Extend cache time
else:
requests = (grequests.get(u) for u in allURLS)
responses = grequests.imap(requests)
for r in responses:
response = r.json()
if not response['success']:
raise HypixelAPIError(response)
if typeOfRequest == 'player':
if response['player'] is None:
raise PlayerNotFoundException(uuid)
if typeOfRequest != 'key': # Don't cache key requests.
requestCache[cacheURL] = {}
requestCache[cacheURL]['data'] = response
requestCache[cacheURL]['cacheTime'] = time() + cacheTime # Cache request and clean current cache.
cleanCache()
try:
return response[typeOfRequest]
except KeyError:
return response
def cleanCache():
""" This function is occasionally called to clean the cache of any expired objects. """
itemsToRemove = []
for item in requestCache:
try:
if requestCache[item]['cacheTime'] < time():
itemsToRemove.append(item)
except:
pass
for item in itemsToRemove:
requestCache.pop(item)
def setCacheTime(seconds):
""" This function sets how long the request cache should last, in seconds.
Parameters
-----------
seconds : float
How long you would like Hypixel-API requests to be cached for.
"""
try:
global cacheTime
cacheTime = float(seconds)
return "Cache time has been successfully set to {} seconds.".format(cacheTime)
except ValueError as chainedException:
raise HypixelAPIError("Invalid cache time \"{}\"".format(seconds)) from chainedException
def setKeys(api_keys):
""" This function is used to set your Hypixel API keys.
It also checks that they are valid/working.
Raises
------
HypixelAPIError
If any of the keys are invalid or don't work, this will be raised.
Parameters
-----------
api_keys : list
A list of the API keys that you would like to use.
Example: ``['740b8cf8-8aba-f2ed-f7b10119d28']``.
"""
for api_key in api_keys:
if len(api_key) == HYPIXEL_API_KEY_LENGTH:
response = getJSON('key', key=api_key)
if response['success']:
verified_api_keys.append(api_key)
else:
raise HypixelAPIError("hypixel/setKeys: Error with key XXXXXXXX-XXXX-XXXX-XXXX{} | {}".format(api_key[23:], response))
else:
raise HypixelAPIError("hypixel/setKeys: The key '{}' is not 36 characters.".format(api_key))
class Player:
""" This class represents a player on Hypixel as a single object.
A player has a UUID, a username, statistics etc.
Raises
------
PlayerNotFoundException
If the player cannot be found, this will be raised.
Parameters
-----------
Username/UUID : string
Either the UUID or the username (Deprecated) for a Minecraft player.
Attributes
-----------
JSON : string
The raw JSON receieved from the Hypixel API.
UUID : string
The player's UUID.
"""
JSON = None
UUID = None
def __init__(self, UUID):
""" This is called whenever someone uses hypixel.Player('Snuggle').
Get player's UUID, if it's a username. Get Hypixel-API data. """
self.UUID = UUID
if len(UUID) <= 16: # If the UUID isn't actually a UUID... *rolls eyes* Lazy people.
self.JSON = getJSON('player', uuid=UUID) # Get player's Hypixel-API JSON information.
JSON = self.JSON
self.UUID = JSON['uuid'] # Pretend that nothing happened and get the UUID from the API.
elif len(UUID) in (32, 36): # If it's actually a UUID, with/without hyphens...
self.JSON = getJSON('player', uuid=UUID)
else:
raise PlayerNotFoundException(UUID)
def getPlayerInfo(self):
""" This is a simple function to return a bunch of common data about a player. """
JSON = self.JSON
playerInfo = {}
playerInfo['uuid'] = self.UUID
playerInfo['displayName'] = Player.getName(self)
playerInfo['rank'] = Player.getRank(self)
playerInfo['networkLevel'] = Player.getLevel(self)
JSONKeys = ['karma', 'firstLogin', 'lastLogin',
'mcVersionRp', 'networkExp', 'socialMedia', 'prefix']
for item in JSONKeys:
try:
playerInfo[item] = JSON[item]
except KeyError:
pass
return playerInfo
def getName(self):
""" Just return player's name. """
JSON = self.JSON
return JSON['displayname']
def getLevel(self):
""" This function calls leveling.py to calculate a player's network level. """
JSON = self.JSON
networkExp = JSON.get('networkExp', 0)
networkLevel = JSON.get('networkLevel', 0)
exp = leveling.getExperience(networkExp, networkLevel)
myoutput = leveling.getExactLevel(exp)
return myoutput
def getUUID(self):
JSON = self.JSON
return JSON['uuid']
def getRank(self):
""" This function returns a player's rank, from their data. """
JSON = self.JSON
playerRank = {} # Creating dictionary.
playerRank['wasStaff'] = False
possibleRankLocations = ['packageRank', 'newPackageRank', 'monthlyPackageRank', 'rank']
# May need to add support for multiple monthlyPackageRank's in future.
for Location in possibleRankLocations:
if Location in JSON:
if Location == 'rank' and JSON[Location] == 'NORMAL':
playerRank['wasStaff'] = True
else:
if JSON[Location] == "NONE": # If monthlyPackageRank expired, ignore "NONE". See: https://github.com/Snuggle/hypixel.py/issues/9
continue
dirtyRank = JSON[Location].upper().replace("_", " ").replace(" Plus", "+")
playerRank['rank'] = dirtyRank.replace("Superstar", "MVP++").replace("Youtuber", "YouTube")
if 'rank' not in playerRank:
playerRank['rank'] = 'Non'
return playerRank
def getGuildID(self):
""" This function is used to get a GuildID from a player. """
UUID = self.UUID
GuildID = getJSON('findGuild', byUuid=UUID)
return GuildID['guild']
def getSession(self):
""" This function is used to get a player's session information. """
UUID = self.UUID
try:
session = getJSON('session', uuid=UUID)
except HypixelAPIError:
session = None
return session
class Guild:
""" This class represents a guild on Hypixel as a single object.
A guild has a name, members etc.
Parameters
-----------
GuildID : string
The ID for a Guild. This can be found by using :class:`Player.getGuildID()`.
Attributes
-----------
JSON : string
The raw JSON receieved from the Hypixel API.
GuildID : string
The Guild's GuildID.
"""
JSON = None
GuildID = None
def __init__(self, GuildID):
try:
if len(GuildID) == 24:
self.GuildID = GuildID
self.JSON = getJSON('guild', id=GuildID)
except Exception as chainedException:
raise GuildIDNotValid(GuildID) from chainedException
def getMembers(self):
""" This function enumerates all the members in a guild.
Mojang's API rate-limits this weirdly.
This is an extremely messy helper function. Use at your own risk. """
guildRoles = ['MEMBER', 'OFFICER', 'GUILDMASTER'] # Define variables etc.
memberDict = self.JSON['members']
allGuildMembers = {}
for role in guildRoles: # Make allGuildMembers =
allGuildMembers[role] = [] # {MEMBER: [], OFFICER: [], GUILDMASTER: []}
allURLS = []
URLStoRequest = []
roleOrder = []
memberList = []
requests = None
responses = None
for member in memberDict: # For each member, use the API to get their username.
roleOrder.append(member['rank'])
if UUIDResolverAPI + member['uuid'] in requestCache:
print("cached")
allURLS.append(requestCache[UUIDResolverAPI + member['uuid']]['name'])
else:
print("NOPE")
allURLS.append(UUIDResolverAPI + member['uuid'])
URLStoRequest.append(UUIDResolverAPI + member['uuid'])
requests = (grequests.get(u) for u in URLStoRequest)
responses = grequests.map(requests)
for response in responses:
requestCache[UUIDResolverAPI + response.json()['id']] = response.json()
i = 0
for uindex, user in enumerate(allURLS):
try:
if user.startswith(UUIDResolverAPI):
allURLS[uindex] = responses[i].json()['name']
i += 1
except AttributeError:
pass
i = 0
for name in allURLS:
try:
member = {'role': roleOrder[i], 'name': name}
except KeyError:
member = {'role': roleOrder[i], 'name': 'Unknown'}
memberList.append(member)
i = i + 1
for member in memberList:
roleList = allGuildMembers[member['role']]
roleList.append(member['name'])
return allGuildMembers
class Auction:
""" This class represents an auction on Hypixel Skyblock as a single object.
"""
def __init__(self):
""""Called to create an Auction class."""
pass
def getAuctionInfo(self, PageNumber):
"""Gets all the auction info for a specified page. PageNumber is the page that is requested and can be in int form or string"""
return getJSON("skyblock/auction", page = str(PageNumber))
#TODO Add more info
class SkyblockPlayer:
"""A class for a Skyblock player. It requires a UUID, and will return stats on the player
Raises
------
SkyblockUUIDRequired
If you pass in a normal username such as RedKaneChironic, will throw an error as Hypixel Skyblock's API currently does not support usernames
PlayerNotFoundException
If the player cannot be found, this will be raised.
Parameters
-----------
UUID: string
UUID of the Player
JSON: string
Raw JSON data"""
def __init__(self, UUID):
self.UUID = UUID
if len(UUID) <= 16: #UUID is a Minecraft username
raise SkyblockUUIDRequired(UUID)
elif len(UUID) in (32, 36):
self.JSON = getJSON('skyblock/player', uuid = UUID)
else:
raise PlayerNotFoundException(UUID)
if __name__ == "__main__":
print("This is a Python library and shouldn't be run directly.\n"
"Please look at https://github.com/Snuggle/hypixel.py for usage & installation information.")
| mit | 1,250,720,892,829,467,600 | 36.678284 | 148 | 0.595347 | false |
AdaptivePELE/AdaptivePELE | AdaptivePELE/freeEnergies/prepareMSMFolders.py | 1 | 3916 | from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import shutil
class Constants():
def __init__(self, trajs_path=None):
self.trajFolder = "allTrajs"
if trajs_path is not None:
self.trajFolder = os.path.join(trajs_path, self.trajFolder)
self.origTrajFiles = os.path.join(self.trajFolder, "traj_*")
self.trajFileEpoch = os.path.join(self.trajFolder, "traj_%d_*")
self.nonRepeatedTrajEpoch = os.path.join(self.trajFolder, "extractedCoordinates", "traj_%d_*")
self.msmFolder = "MSM_%d"
self.rawDataFolder = os.path.join(self.msmFolder, "rawData")
self.nonRepeatedCoordsFolder = os.path.join(self.rawDataFolder, "extractedCoordinates")
self.templetizedControlFileMSM = "templetized_control_MSM.conf"
def extractEpoch(f):
# Split the filename blablb_0_1.dat into [balblb, 0, 1.dat]
return f.rsplit("_", 2)[1]
def getAllDifferentEpochs(origTrajFiles):
trajFiles = glob.glob(origTrajFiles)
epochs = set([])
for f in trajFiles:
epoch = extractEpoch(f)
epochs.add(int(epoch))
epochs = sorted(epochs)
return epochs
def makeMSMFolders(epochs, msmFolder):
for epoch in epochs:
if not os.path.exists(msmFolder % epoch):
os.makedirs(msmFolder % epoch)
def makeRawDataFolders(epochs, rawDataFolder, nonRepeatedCoordsFolder):
"""
This folder contains symbolic links to the corresponding trajectories
"""
for epoch in epochs:
folder = rawDataFolder % epoch
nonRepeatedFolder = nonRepeatedCoordsFolder % epoch
if not os.path.exists(folder):
os.makedirs(folder)
if not os.path.exists(nonRepeatedFolder):
os.makedirs(nonRepeatedFolder)
def makeSymbolicLinksForFiles(filelist, destination):
for src in filelist:
[srcFolder, filename] = os.path.split(src)
# srcFolder = os.path.abspath(srcFolder)
# Switch the symbolic links to use a relative path, so if the
# folders containing the data are moved they will not break
srcFolder = os.path.relpath(srcFolder, start=destination)
src = os.path.join(srcFolder, filename)
dest = os.path.join(destination, filename)
try:
if not os.path.isfile(dest):
os.symlink(src, dest)
except OSError:
pass
def makeSymbolicLinks(epochs, rawDataFolder, trajFileEpoch, trajNonRepeatedEpoch, nonRepeatedRawData):
for epoch in epochs:
destFolder = rawDataFolder % epoch
destNonRepeatedFolder = nonRepeatedRawData % epoch
for prevEpoch in range(epoch+1):
sourcesPrevEpoch = glob.glob(trajFileEpoch % prevEpoch)
makeSymbolicLinksForFiles(sourcesPrevEpoch, destFolder)
nonRepeatedPrevEpoch = glob.glob(trajNonRepeatedEpoch % prevEpoch)
makeSymbolicLinksForFiles(nonRepeatedPrevEpoch, destNonRepeatedFolder)
def copyMSMcontrolFile(epochs, msmFolder, templetizedControlFileMSM):
scriptsFolder = os.path.dirname(os.path.realpath(__file__))
scriptsFile = os.path.join(scriptsFolder, templetizedControlFileMSM)
print(scriptsFile)
for epoch in epochs:
dst = os.path.join(msmFolder % epoch, templetizedControlFileMSM)
shutil.copyfile(scriptsFile, dst)
def main(trajsPath=None):
constants = Constants(trajsPath)
epochs = getAllDifferentEpochs(constants.origTrajFiles)
makeMSMFolders(epochs, constants.msmFolder)
makeRawDataFolders(epochs, constants.rawDataFolder, constants.nonRepeatedCoordsFolder)
makeSymbolicLinks(epochs, constants.rawDataFolder, constants.trajFileEpoch, constants.nonRepeatedTrajEpoch, constants.nonRepeatedCoordsFolder)
# copyMSMcontrolFile(epochs, constants.msmFolder, constants.templetizedControlFileMSM)
if __name__ == "__main__":
main()
| mit | -1,821,340,976,296,350,200 | 37.019417 | 146 | 0.698672 | false |
openmaraude/APITaxi | APITaxi_models2/migrations/versions/20210614_13:57:26_5b8d8ca36aeb_new_revision.py | 1 | 3206 | """New revision
Revision ID: 5b8d8ca36aeb
Revises: 5d164bbb4813
Create Date: 2021-06-14 13:57:26.772630
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5b8d8ca36aeb'
down_revision = '5d164bbb4813'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute('''update customer set phone_number = '' where phone_number is null''')
conn.execute('''update hail set taxi_phone_number = '' where taxi_phone_number is null''')
conn.execute('''update role set description = '' where description is null''')
conn.execute('''update vehicle_description set color = '' where color is null''')
conn.execute('''update vehicle_description set engine = '' where engine is null''')
conn.execute('''update vehicle_description set horodateur = '' where horodateur is null''')
conn.execute('''update vehicle_description set taximetre = '' where taximetre is null''')
op.alter_column('customer', 'phone_number',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('hail', 'taxi_phone_number',
existing_type=sa.VARCHAR(),
server_default='',
nullable=False)
op.alter_column('role', 'description',
existing_type=sa.VARCHAR(length=255),
server_default='',
nullable=False)
op.alter_column('vehicle_description', 'color',
existing_type=sa.VARCHAR(length=255),
server_default='',
nullable=False)
op.alter_column('vehicle_description', 'engine',
existing_type=sa.VARCHAR(length=80),
server_default='',
nullable=False)
op.alter_column('vehicle_description', 'horodateur',
existing_type=sa.VARCHAR(length=255),
server_default='',
nullable=False)
op.alter_column('vehicle_description', 'taximetre',
existing_type=sa.VARCHAR(length=255),
server_default='',
nullable=False)
def downgrade():
op.alter_column('vehicle_description', 'taximetre',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('vehicle_description', 'horodateur',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('vehicle_description', 'engine',
existing_type=sa.VARCHAR(length=80),
nullable=True)
op.alter_column('vehicle_description', 'color',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('role', 'description',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('hail', 'taxi_phone_number',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('customer', 'phone_number',
existing_type=sa.VARCHAR(),
nullable=True)
| agpl-3.0 | 8,408,152,668,629,320,000 | 39.582278 | 95 | 0.569869 | false |
ml31415/numpy-groupies | numpy_groupies/benchmarks/simple.py | 1 | 4248 | #!/usr/bin/python -B
# -*- coding: utf-8 -*-
from __future__ import print_function
import timeit
import numpy as np
from numpy_groupies.utils import aliasing
from numpy_groupies import aggregate_py, aggregate_np, aggregate_ufunc
from numpy_groupies.aggregate_pandas import aggregate as aggregate_pd
def aggregate_group_loop(*args, **kwargs):
"""wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops."""
func = kwargs['func']
del kwargs['func']
return aggregate_np(*args, func=lambda x: func(x), **kwargs)
print("TODO: use more extensive tests")
print("")
print("-----simple examples----------")
test_a = np.array([12.0, 3.2, -15, 88, 12.9])
test_group_idx = np.array([1, 0, 1, 4, 1 ])
print("test_a: ", test_a)
print("test_group_idx: ", test_group_idx)
print("aggregate(test_group_idx, test_a):")
print(aggregate_np(test_group_idx, test_a)) # group vals by idx and sum
# array([3.2, 9.9, 0., 0., 88.])
print("aggregate(test_group_idx, test_a, sz=8, func='min', fill_value=np.nan):")
print(aggregate_np(test_group_idx, test_a, size=8, func='min', fill_value=np.nan))
# array([3.2, -15., nan, 88., nan, nan, nan, nan])
print("aggregate(test_group_idx, test_a, sz=5, func=lambda x: ' + '.join(str(xx) for xx in x),fill_value='')")
print(aggregate_np(test_group_idx, test_a, size=5, func=lambda x: ' + '.join(str(xx) for xx in x), fill_value=''))
print("")
print("---------testing--------------")
print("compare against group-and-loop with numpy")
testable_funcs = {aliasing[f]: f for f in (np.sum, np.prod, np.any, np.all, np.min, np.max, np.std, np.var, np.mean)}
test_group_idx = np.random.randint(0, int(1e3), int(1e5))
test_a = np.random.rand(int(1e5)) * 100 - 50
test_a[test_a > 25] = 0 # for use with bool functions
for name, f in testable_funcs.items():
numpy_loop_group = aggregate_group_loop(test_group_idx, test_a, func=f)
for acc_func, acc_name in [(aggregate_np, 'np-optimised'),
(aggregate_ufunc, 'np-ufunc-at'),
(aggregate_py, 'purepy'),
(aggregate_pd, 'pandas')]:
try:
test_out = acc_func(test_group_idx, test_a, func=name)
test_out = np.asarray(test_out)
if not np.allclose(test_out, numpy_loop_group.astype(test_out.dtype)):
print(name, acc_name, "FAILED test, output: [" + acc_name + "; correct]...")
print(np.vstack((test_out, numpy_loop_group)))
else:
print(name, acc_name, "PASSED test")
except NotImplementedError:
print(name, acc_name, "NOT IMPLEMENTED")
print("")
print("----------benchmarking-------------")
print("Note that the actual observed speedup depends on a variety of properties of the input.")
print("Here we are using 100,000 indices uniformly picked from [0, 1000).")
print("Specifically, about 25% of the values are 0 (for use with bool operations),")
print("the remainder are uniformly distribuited on [-50,25).")
print("Times are scaled to 10 repetitions (actual number of reps used may not be 10).")
print(''.join(['function'.rjust(8), 'pure-py'.rjust(14), 'np-grouploop'.rjust(14),
'np-ufuncat'.rjust(14), 'np-optimised'.rjust(14), 'pandas'.rjust(14),
'ratio'.rjust(15)]))
for name, f in testable_funcs.items():
print(name.rjust(8), end='')
times = [None] * 5
for ii, acc_func in enumerate([aggregate_py, aggregate_group_loop,
aggregate_ufunc, aggregate_np,
aggregate_pd]):
try:
func = f if acc_func is aggregate_group_loop else name
reps = 3 if acc_func is aggregate_py else 20
times[ii] = timeit.Timer(lambda: acc_func(test_group_idx, test_a, func=func)).timeit(number=reps) / reps * 10
print(("%.1fms" % ((times[ii] * 1000))).rjust(13), end='')
except NotImplementedError:
print("no-impl".rjust(13), end='')
denom = min(t for t in times if t is not None)
ratios = [("-".center(4) if t is None else str(round(t / denom, 1))).center(5) for t in times]
print(" ", (":".join(ratios)))
| bsd-2-clause | -2,425,501,684,732,706,300 | 45.173913 | 121 | 0.608992 | false |
DBuildService/osbs-client | tests/test_retries.py | 1 | 2910 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
These tests are moved to a separate file due to https://github.com/bkabrda/flexmock/issues/13
"""
import logging
from flexmock import flexmock
import pytest
import requests
import six
from requests.packages.urllib3.util import Retry
from osbs.http import HttpSession, HttpStream
from osbs.exceptions import OsbsNetworkException, OsbsResponseException
from osbs.constants import HTTP_RETRIES_STATUS_FORCELIST, HTTP_RETRIES_METHODS_WHITELIST
from osbs.core import Openshift
from osbs.http import http_client
logger = logging.getLogger(__file__)
@pytest.fixture
def s():
return HttpSession(verbose=True)
def has_connection():
# In case we run tests in an environment without internet connection.
try:
HttpStream("https://httpbin.org/get", "get", retries_enabled=False)
return True
except (OsbsNetworkException, requests.exceptions.ConnectionError):
return False
# Replace real retry with fake version to speed up testing
fake_retry = Retry(total=1,
backoff_factor=1,
status_forcelist=HTTP_RETRIES_STATUS_FORCELIST)
@pytest.mark.skipif(not has_connection(),
reason="requires internet connection")
class TestHttpRetries(object):
@pytest.mark.parametrize('status_code', HTTP_RETRIES_STATUS_FORCELIST)
@pytest.mark.parametrize('method', HTTP_RETRIES_METHODS_WHITELIST)
def test_fail_after_retries(self, s, status_code, method):
flexmock(Retry).new_instances(fake_retry)
# latest python-requests throws OsbsResponseException, 2.6.x - OsbsNetworkException
with pytest.raises((OsbsNetworkException, OsbsResponseException)) as exc_info:
s.request(method=method, url='http://httpbin.org/status/%s' % status_code).json()
if isinstance(exc_info, OsbsResponseException):
assert exc_info.value.status_code == status_code
def test_stream_logs_not_decoded(self, caplog):
flexmock(Retry).new_instances(fake_retry)
server = Openshift('http://oapi/v1/', 'v1', 'http://oauth/authorize',
k8s_api_url='http://api/v1/')
logs = (
u'Lógs'.encode('utf-8'),
u'Lðgs'.encode('utf-8'),
)
fake_response = flexmock(status_code=http_client.OK, headers={})
(fake_response
.should_receive('iter_lines')
.and_yield(*logs)
.with_args(decode_unicode=False))
(flexmock(requests)
.should_receive('request')
.and_return(fake_response))
with caplog.at_level(logging.ERROR):
for result in server.stream_logs('anything'):
assert isinstance(result, six.binary_type)
| bsd-3-clause | 4,239,404,509,889,251,300 | 33.619048 | 93 | 0.675378 | false |
ajrichards/htsint | htsint/tools/ExpressionLib.py | 1 | 4968 | #!/usr/bin/env python
"""
tools for expression and count based tasks
"""
import os,sys,csv,gc,re
import numpy as np
def read_RSEM_counts_files(geneFilePath,isoformFilePath):
"""
read the RSEM counts files into a matrix
"""
if not os.path.exists(geneFilePath):
raise Exception("Cannot find gene file\n%s"%(geneFilePath))
if not os.path.exists(isoformFilePath):
raise Exception("Cannot find isoform file\n%s"%(isoformFilePath))
## load the gene counts
fid1 = open(geneFilePath,'rU')
reader1 = csv.reader(fid1,delimiter="\t")
header1 = next(reader1)
results1 = {}
check = 0
gc.disable()
for linja in reader1:
check += 1
results1[linja[0]] = {'transcript':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':int(round(float(linja[4]))),'TPM':float(linja[5]),'FPKM':float(linja[6])}
fid1.close()
if check != len(results1.keys()):
raise Exception("Rows in gene count file are not first columns unique")
## load the isoform results
fid2 = open(isoformFilePath,'rU')
reader2 = csv.reader(fid2,delimiter="\t")
header2 = next(reader2)
results2 = {}
check = 0
for linja in reader2:
check += 1
results2[linja[0]] = {'gene':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':float(linja[4]),'TPM':float(linja[5]),'FPKM':float(linja[6])}
fid1.close()
if check != len(results2.keys()):
raise Exception("Rows in gene count file are not first columns unique")
fid2.close()
gc.enable()
return results1, results2
def read_matrix(matFilePath,delimiter=",",mtype='float'):
"""
assumes that row one are the samples and col one are the transcripts
matrix can only be of mtype 'int' or 'float'
"""
print('reading', matFilePath)
if mtype not in ['int','float']:
raise Exception("mtype must be 'int' or 'float'")
if not os.path.exists(matFilePath):
raise Exception("Cannot find matFilePath\n%s"%matFilePath)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
## get the gene and sample ids
transcriptIds = []
sampleIds = np.array(header[1:])
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],sampleIds.shape[0]),dtype=mtype)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
if mtype == 'int':
mat[row,:] = [int(float(i)) for i in linja[1:]]
else:
mat[row,:] = [float(i) for i in linja[1:]]
row +=1
fid.close()
return transcriptIds,sampleIds,mat
def read_de_results(filePath,delimiter=",",tool="edgeR"):
"""
read the differential expression output from DESeq or edgeR
"""
print('reading', filePath)
if not os.path.exists(filePath):
raise Exception("Cannot find matFilePath\n%s"%filePath)
if tool not in ["edgeR","DESeq"]:
raise Exception("invalid tool specified use 'edgeR' or 'DESeq'")
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
## get columnIds
header = next(reader)
columnIds = np.array(header[1:])
## get the gene and sample ids
transcriptIds = []
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],columnIds.shape[0]))
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
_row = [re.sub("NA","NaN",i) for i in linja[1:]]
mat[row,:] = [float(i) for i in _row]
row +=1
fid.close()
return transcriptIds,columnIds,mat
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]#
def create_count_matrix(results,label,sampleList):
"""
this function is untested
"""
## use first sample to get rows
mat = np.zeros((len(results[0].keys()),len(sampleList)))
keys = sorted(np.array(results[0].keys()))
for j,sample in enumerate(sampleList):
for i,key in enumerate(keys):
mat[i,j] = results[j][key]['exp_count']
## write to file
fid = open("%s-counts.csv"%label,'w')
writer = csv.writer(fid)
if re.search("gene",label):
writer.writerow(["gene"]+sampleList)
else:
writer.writerow(["isoform"]+sampleList)
for r in range(mat.shape[0]):
row = [keys[r]] + [int(i) for i in mat[r,:].tolist()]
writer.writerow(row)
fid.close()
| bsd-3-clause | -1,213,594,637,747,343,600 | 27.883721 | 115 | 0.605072 | false |
DamonToumbourou/plugin.video.thisweekinstartups | addon.py | 1 | 2136 | from xbmcswift2 import Plugin, xbmcgui
from resources.lib import thisweekscraper
PLUGIN_URL = 'plugin://plugin.video.youtube/?action=play_video&videoid'
SITE_URL = 'https://www.youtube.com/user/ThisWeekIn'
plugin = Plugin()
@plugin.route('/')
def main_menu():
items = [
{
'label': plugin.get_string(30000),
'path': plugin.url_for('get_latest'),
'thumbnail': 'http://ec-cdn-assets.stitcher.com/feedimagesplain328/9728.jpg',
},
{
'label': plugin.get_string(30001),
'path': plugin.url_for('get_highlight'),
'thumbnail': 'http://ec-cdn-assets.stitcher.com/feedimagesplain328/9728.jpg',
},
{
'label': plugin.get_string(30002),
'path': plugin.url_for('get_topten'),
'thumbnail': 'http://ec-cdn-assets.stitcher.com/feedimagesplain328/9728.jpg',
}
]
return items
@plugin.route('/latest/')
def get_latest():
keyword = 'Uploads'
content = thisweekscraper.get_latest(SITE_URL, keyword)
items = []
for i in content:
items.append({
'label': i['label'],
'path': PLUGIN_URL + i['path'],
'thumbnail': i['thumbnail'],
'is_playable': True,
})
return items
@plugin.route('/highlight/')
def get_highlight():
keyword = 'Highlight Clips'
content = thisweekscraper.get_latest(SITE_URL, keyword)
items = []
for i in content:
items.append({
'label': i['label'],
'path': PLUGIN_URL + i['path'],
'thumbnail': i['thumbnail'],
'is_playable': True,
})
return items
@plugin.route('/topten/')
def get_topten():
keyword = 'Top 10 TWiST videos'
content = thisweekscraper.get_latest(SITE_URL, keyword)
items = []
for i in content:
items.append({
'label': i['label'],
'path': PLUGIN_URL + i['path'],
'thumbnail': i['thumbnail'],
'is_playable': True,
})
return items
if __name__ == '__main__':
plugin.run()
| mit | 3,201,582,505,885,879,000 | 23 | 89 | 0.542135 | false |
yuanming-hu/taichi | tests/python/test_field.py | 1 | 2199 | '''
To test our new `ti.field` API is functional (#1500)
'''
import pytest
import taichi as ti
data_types = [ti.i32, ti.f32, ti.i64, ti.f64]
field_shapes = [(), 8, (6, 12)]
vector_dims = [3]
matrix_dims = [(1, 2), (2, 3)]
@pytest.mark.parametrize('dtype', data_types)
@pytest.mark.parametrize('shape', field_shapes)
@ti.host_arch_only
def test_scalar_field(dtype, shape):
x = ti.field(dtype, shape)
if isinstance(shape, tuple):
assert x.shape == shape
else:
assert x.shape == (shape, )
assert x.dtype == dtype
@pytest.mark.parametrize('n', vector_dims)
@pytest.mark.parametrize('dtype', data_types)
@pytest.mark.parametrize('shape', field_shapes)
@ti.host_arch_only
def test_vector_field(n, dtype, shape):
x = ti.Vector.field(n, dtype, shape)
if isinstance(shape, tuple):
assert x.shape == shape
else:
assert x.shape == (shape, )
assert x.dtype == dtype
assert x.n == n
assert x.m == 1
@pytest.mark.parametrize('n,m', matrix_dims)
@pytest.mark.parametrize('dtype', data_types)
@pytest.mark.parametrize('shape', field_shapes)
@ti.host_arch_only
def test_matrix_field(n, m, dtype, shape):
x = ti.Matrix.field(n, m, dtype=dtype, shape=shape)
if isinstance(shape, tuple):
assert x.shape == shape
else:
assert x.shape == (shape, )
assert x.dtype == dtype
assert x.n == n
assert x.m == m
@ti.host_arch_only
def test_field_needs_grad():
# Just make sure the usage doesn't crash, see #1545
n = 8
m1 = ti.field(dtype=ti.f32, shape=n, needs_grad=True)
m2 = ti.field(dtype=ti.f32, shape=n, needs_grad=True)
gr = ti.field(dtype=ti.f32, shape=n)
@ti.kernel
def func():
for i in range(n):
gr[i] = m1.grad[i] + m2.grad[i]
func()
@pytest.mark.parametrize('dtype', [ti.f32, ti.f64])
def test_default_fp(dtype):
ti.init(default_fp=dtype)
x = ti.Vector.field(2, float, ())
assert x.dtype == ti.get_runtime().default_fp
@pytest.mark.parametrize('dtype', [ti.i32, ti.i64])
def test_default_ip(dtype):
ti.init(default_ip=dtype)
x = ti.Vector.field(2, int, ())
assert x.dtype == ti.get_runtime().default_ip
| mit | 4,601,270,125,737,861,600 | 22.393617 | 57 | 0.624375 | false |
Eric-Matthews/Explore_RPG | d_verb_commands.py | 1 | 3001 | from d_toolbox import *
import d_help
# Holds the verb commands to be called by the game.
# Each command must be in the VERB BANK as well as have a function which uses the exact same name.
# The main game can pass NONE to the functions. Depending on the command this may or may not be valid input.
# All must return a string to print, and whether or not an in game day has passed with the time taken as a boolean.
# All have to return this as part of the automated and segregated way that these functions are implemented.
# Likewise this is why all take the PC as well as a second string as parameters.
# Put all commands the player can type to take action into this list.
verb_bank = ['dice', 'drop', 'equip', 'help', 'inv', 'look', 'unequip', 'use']
def dice(pc, dice_string):
if dice_string == None: dice_string = ''
if dice_string.replace('+','', 1).replace('d', '', 1).isdigit() == True:
to_print = str(dice_roller(dice_string))
else: to_print = "Please use the format 'DICE 1d2+3'. The +X is optional."
return to_print, False
def drop(pc, to_drop):
if to_drop == None:
print "You drop to the dirt. Ow."
return "Better get up and moving.", True
elif to_drop.title() in pc.inv:
if 'DROP' in pc.inv[to_drop.title()]['item'].actions:
pc.inv.item_rem(to_drop.title())
return "You dispose of the {}.".format(to_drop), False
else: return "You think the better of discarding the {}".format(to_drop), False
else: return "You seem to have already lost the {}.".format(to_drop), False
def equip(pc, to_equip):
if to_equip == None:
return "You'll need to be more specific.", False
elif to_equip in pc.inv:
if hasattr(pc.inv[to_equip]['item'], 'equip'):
pc.inv[to_equip]['item'].equip(pc)
return "", False
else: return "You can't equip that {}.".format(to_equip), False
else:
return "You don't have any equipable {}.".format(to_equip), False
def help(pc, location):
to_print = d_help.help_command(pc, location)
return to_print, False
def inv(pc, passed_string):
pc.inv.print_inv()
return '', False
def look(pc, looked_at_data):
print looked_at_data
to_print = "Not much here."
if 'long desc' in looked_at_data:
to_print = looked_at_data['long desc']
elif looked_at_data.lower() in ['me', 'char', 'self']:
pc.print_stats()
to_print = ''
return to_print, False
def unequip(pc, to_unequip):
if to_unequip == None:
return "You'll need to be more specific.", False
for slot, equipment in pc.equip_slots.items():
if hasattr(equipment, 'name'):
if equipment.name.lower() == to_unequip:
equipment.unequip(pc)
return "", False
else:
return "You have no {} equipped.".format(to_unequip), False
def use(pc, to_use):
if to_use == None:
return "You'll need to be more specific", False
elif to_use in pc.inv:
pc.inv.item_use(to_use, pc, pc)
return "item used.", False
else:
return "You don't have any {} to use.".format(to_use), False | gpl-3.0 | 8,461,055,582,772,697,000 | 35.5375 | 115 | 0.66078 | false |
bbfamily/abu | abupy/SlippageBu/ABuSlippageBuyBase.py | 1 | 4976 | # -*- encoding:utf-8 -*-
"""
日内滑点买入价格决策基础模块:暂时迁移简单实现方式,符合回测需求,如迁移实盘模块
需添加日内择时策略,通过日内分钟k线,实现日内分钟k线择时,更微观的
实现日内择时滑点功能,不考虑大资金的冲击成本及系统外的大幅滑点
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import functools
import numpy as np
from ..CoreBu.ABuFixes import six
__author__ = '阿布'
__weixin__ = 'abu_quant'
class AbuSlippageBuyBase(six.with_metaclass(ABCMeta, object)):
"""非高频日内滑点买入决策抽象基类"""
def __init__(self, kl_pd_buy, factor_name):
"""
:param kl_pd_buy: 交易当日的交易数据
:param factor_name: ABuFactorBuyBases子类实例对象的factor_name
"""
self.buy_price = np.inf
self.kl_pd_buy = kl_pd_buy
self.factor_name = factor_name
def fit(self):
"""做基础验证比如今天是否停盘后调用fit_price"""
if self.kl_pd_buy.empty or self.kl_pd_buy.volume == 0:
# 买入时正无穷为放弃单子
return np.inf
return self.fit_price()
@abstractmethod
def fit_price(self):
"""
子类主要需要实现的函数,决策交易当日的最终买入价格
:return: 最终决策的当前交易买入价格
"""
pass
"""是否开启涨停板滑点买入价格特殊处理, 默认关闭,外部修改如:abupy.slippage.sbb.g_enable_limit_up = True"""
g_enable_limit_up = False
"""
初始设定涨停板买入成交概率100%,这里也可以在计算完一次概率后,再使用成交量做二次概率计算,
外部修改如:abupy.slippage.sbb.g_limit_up_deal_chance = 0.5,即修改为买入概率50%
"""
g_limit_up_deal_chance = 1
"""在集合竞价阶段价格已经达成涨停的情况下买入成功的概率,默认0.2, 即20%成功概率"""
g_pre_limit_up_rate = 0.2
def slippage_limit_up(func):
"""
针对a股涨停板买入价格决策的装饰器,子类可选择装饰与不装饰在fit_price上
如果是实盘策略中,使用分钟k线,及日内择时策略,即不需特别处理。
回测中需要特别处理,处理买入成功概率,根据概率决定是否能买入,
及涨停下的买入价格决策,涨停下买入价格模型为,越靠近涨停价格
买入成交概率越大,即在涨停下预期以靠近涨停价格买入,缺点是使用了随机数,
导致回测结果将出现不一致的情况
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if g_enable_limit_up and self.kl_pd_buy.p_change >= 10 and self.kl_pd_buy.high == self.kl_pd_buy.close:
"""
涨停板命中后需要根据涨停板买入成交概率(g_limit_up_deal_chance)来作为
二项式分布的概率值计算买入成功概率
"""
if self.kl_pd_buy.high == self.kl_pd_buy.low:
# 10个点,且最高=最低,即a股在集合竞价阶段达成涨停,买入成功概率降低到g_limit_up_deal_chance * 0.2
# TODO 这个概率最好使用成交量当日来计算出来
limit_up_deal_chance = g_limit_up_deal_chance * g_pre_limit_up_rate
else:
limit_up_deal_chance = g_limit_up_deal_chance
deal = np.random.binomial(1, limit_up_deal_chance)
if deal:
if self.kl_pd_buy.high == self.kl_pd_buy.low:
return self.kl_pd_buy.high
# 买入成功后需要进一步决策价位,首选arange出一个从低到涨停价格的序列,间隔0.01
price_lh = np.arange(self.kl_pd_buy.low, self.kl_pd_buy.high, 0.01)
# 构造概率序列,可以使用其它比如指数分布等,提高以涨停价格买入的概率,这里只使用最简单方式
lh_chance = np.linspace(0, 1, len(price_lh))
"""
计算出对应的概率, 这里的概率分布并不陡峭,即涨停价格附近权重并不是很高,
可以使用如:np.power(price_hl, len(price_hl) / 2) / np.power(price_hl, len(price_hl) / 2).sum()
来进一步提升涨跌板附近价格的买入权重
"""
# noinspection PyUnresolvedReferences
p = lh_chance / lh_chance.sum()
# 最后使用随机加权概率抽取,选中一个买入涨停价格
return np.random.choice(price_lh, 1, p=p)[0]
# 没能成交返回正无穷
return np.inf
else:
return func(self, *args, **kwargs)
return wrapper
| gpl-3.0 | 1,950,365,726,494,072,000 | 31.448598 | 111 | 0.591302 | false |
mtwharmby/assorted-scripts | PythonPlayground/mergeHDFs.py | 1 | 6095 | '''
Created on 4 Jul 2016
@author: wnm24546
'''
import glob, os, re, sys
import h5py
import numpy as np
############################################
## EDIT LINES BELOW HERE ###################
############################################
#Give the directory path the files are and... (all values between ' or ")
working_directory = '/scratch/tmp/pdf_data'
#...either the full name of the files...
filenames_list=[]
#filenames_list = ['112004_KNJ-KA-218-01-PtNi-120s.hdf5', '112031_KNJ-KA-218-01-PtNi-120s.hdf5']
#...or the base name of the files and their numbers
file_name_template = 'Ce-BDC(250C-1hr)_aq-6s_2min'
file_numbers = []#222643, 222702, 222761, 222820, 222879,
#222938, 222997, 223056, 223115, 223174]
############################################
## NOTHING SHOULD NEED EDITING BELOW HERE ##
############################################
def main(files, template=None):
#Get numpy datasets from each of the files and put them into a list
dataSets = []
for name in files:
# if template == -1:
fName = os.path.join(wDir, name)
# else:
# fName = os.path.join(wDir, str(name)+template)
dataSets.append(get_data_from_file(fName))
#Merge dataSets into one big dataset with the same shape (0,2048,2048)
sumDataSet = np.zeros(dataSets[0].shape, dtype=np.int32)
for dataSet in dataSets:
sumDataSet = np.add(dataSet, sumDataSet)
#Create an average dataset by dividing the sumdataset by the number of files
avsDataSet = sumDataSet/len(files)
#Output the summed data and the averaged data to two HDF files with different names
outputFiles = {'summed' : sumDataSet, 'averaged' : avsDataSet}
for key in outputFiles:
if template == None:
output_file_name = key+".hdf5"
else:
output_file_name = key+"_"+template+".hdf5"
output_path = os.path.join(wDir, 'processing')
if not os.path.exists(output_path):
os.makedirs(output_path)
print "Writing "+key.title()+" dataset file..."
with h5py.File(os.path.join(output_path, output_file_name), 'w') as out_hdf:
out_hdf.attrs['creator']="mergeHDFs.py"
out_hdf.attrs['author']="Diamond Light Source Ltd."
out_hdf.attrs['comment']=key.title()+" dataset from "+str(len(files))+" HDF files (full names given in input_files attribute)."
out_hdf.attrs['input_files']=", ".join(files)
entry = out_hdf.create_group('entry')
instrument = entry.create_group('instrument')
detector = instrument.create_group('detector')
data = detector.create_dataset('data', data=outputFiles[key])
data.attrs['dim0']="frame number n"
data.attrs['dim1']="NDArray dim1"
data.attrs['dim2']="NDArray dim0"
data.attrs['interpretation']="image"
data.attrs['signal']=1
out_hdf.close()
def get_data_from_file(filename):
print "Reading "+filename+"..."
with h5py.File(filename, 'r') as dataFile:
return dataFile['/entry/instrument/detector/data'][()]
def usage_message():
print ("\nmergeHDFs can be configured in the script, or will take either one or two \n"
"arguments. To configure in the script, set the working_directory,\n"
"file_name_template and file_numbers fields for your needs.\n"
"The three arguments (separated by spaces) the script accepts are:\n"
"\t1) working directory - full path\n"
"\t2) the filename_str name template - .hdf5 is automatically appended\n"
"\t3) filename_str numbers - comma separated list of numbers in the filenames\n"
)
return
#Set the working directory
if len(sys.argv) >= 2:
wDir = sys.argv[1]
elif working_directory != None:
wDir = working_directory
else:
print "ERROR: No working directory given!"
usage_message()
exit(1)
#Check the working directory exists
if not os.path.isdir(wDir):
print "ERROR: Given working directory does not exist/is not directory."
exit(1)
#If we don't have a file_list already, try to make one
if (not filenames_list) | (filenames_list == None): #Empty list or None
#Set the template
if len(sys.argv) >= 3:
template = sys.argv[2]
elif file_name_template != None:
template = file_name_template
else:
print "ERROR: file_name_template not given!"
usage_message()
exit(1)
#Set the filename_str numbers
if len(sys.argv) == 4:
numbers = sys.argv[3].split(",")
elif not (not file_numbers) | (file_numbers != None):#If there are file numbers
numbers = file_numbers
else:
os.chdir(wDir)
numbers=[]
for filename_str in glob.glob("*"+str(template)+"*"):
if ("dark" not in filename_str) & ("pristine" not in filename_str):
numbers.append(re.findall('\d+',filename_str)[0]) #Assumes number we want is the first one in the filename
if not numbers:
print "ERROR: file_numbers not given & could not be found!"
usage_message()
exit(1)
#Make a file_list from the template & numbers
file_list = []
numbers.sort()
for number in numbers:
file_list.append(str(number)+"_"+str(template)+".hdf5")
else:
#We've got a list of all filenames already
file_list = filenames_list
#Check
for filename in file_list:
try:
assert os.path.exists(os.path.join(wDir, filename))
except:
print "ERROR: The file "+str(filename)+" does not exist in "+str(wDir)
exit(1)
if (template == "") | (template == None):
output_template = None
else:
output_template = template.replace("(", "_")
output_template = output_template.replace(")", "_")
output_template = output_template.replace(".", "p")
output_template = str(min(numbers))+"-"+str(max(numbers))+"_"+output_template
if __name__=="__main__":
main(file_list, output_template)
print "\n" | gpl-2.0 | 7,628,204,537,460,579,000 | 35.945455 | 139 | 0.601148 | false |
bnbowman/HlaTools | src/pbhla/io/SamIO.py | 1 | 5513 | from collections import namedtuple
entry = namedtuple('entry', 'qname flag rname pos mapq cigar rnext pnext tlen seq qual')
VALID_HD_TAGS = ['VN', 'SO']
VALID_SQ_TAGS = ['SN', 'LN', 'AS', 'M5', 'SP', 'UR']
REQUIRED_HD_TAGS = ['VN']
REQUIRED_SQ_TAGS = ['SN', 'LN']
class SamHeader( object ):
def __init__(self, lines):
self._version = None
self._sort_order = None
self._references = {}
self._read_groups = []
self._programs = []
self._comments = []
self._parse_input_lines( lines )
def _parse_input_lines(self, lines):
for line in lines:
if line.startswith('@HD'):
self._parse_header_line( line )
elif line.startswith('@SQ'):
self._parse_sequence_line( line )
elif line.startswith('@RG'):
self._parse_read_group_line( line )
elif line.startswith('@PG'):
self._parse_program_line( line )
elif line.startswith('@CO'):
self._parse_comment_line( line )
else:
msg = "Not a recognized header line: {0}".format( line )
raise TypeError( msg )
def _parse_header_line(self, line):
if self._version:
msg = "Only 1 header line allowed, but 2 detected"
raise ValueError( msg )
# Parse and validate the tags
tags = tags_to_dictionary( line.strip().split()[1:] )
validate_tags( tags, VALID_HD_TAGS, REQUIRED_HD_TAGS )
# Set the appropriate variables
self._version = tags['VN']
if 'SO' in tags:
self._sort_order = tags['SO']
def _parse_sequence_line(self, line):
tags = tags_to_dictionary( line.strip().split()[1:] )
validate_tags( tags, VALID_SQ_TAGS, REQUIRED_SQ_TAGS )
if tags['SN'] in self._references:
msg = 'Sequence name "{0}" is duplicated!'.format(tags['SN'])
raise ValueError( msg )
tags['LN'] = int(tags['LN'])
self._references[tags['SN']] = tags
def _parse_read_group_line(self, line):
pass
def _parse_program_line(self, line):
pass
def _parse_comment_line(self, line):
pass
@property
def version(self):
return self._version
@property
def sort_order(self):
return self._sort_order
@property
def references(self):
return self._references
@property
def read_groups(self):
return self._read_groups
@property
def program(self):
return self._program
@property
def comments(self):
return self._comments
class SamEntry( object ):
def __init__(self, line):
parts = line.strip().split()[:11]
self.entry = entry._make(parts)
self._pos = int(self.entry.pos)
self._tlen = int(self.entry.tlen)
@property
def qname(self):
return self.entry.qname
@property
def flag(self):
return self.entry.flag
@property
def rname(self):
return self.entry.rname
@property
def pos(self):
return self._pos
@property
def mapq(self):
return self.entry.mapq
@property
def cigar(self):
return self.entry.cigar
@property
def rnext(self):
return self.entry.rnext
@property
def pnext(self):
return self.entry.pnext
@property
def tlen(self):
return self._tlen
@property
def seq(self):
return self.entry.seq
@property
def qual(self):
return self.entry.qual
@property
def aend(self):
return self.pos + self.tlen
class SamReader( object ):
def __init__(self, f):
self._file = open(f, "r")
self._header = self.parse_header()
self._file = open(f, "r") # Reset the file position
def parse_header(self):
header_lines = []
line_start = 0
for line in self._file:
if line.startswith('@'):
header_lines.append( line )
else:
break
return SamHeader( header_lines )
@property
def header(self):
return self._header
@property
def version(self):
return self.header.version
@property
def sort_order(self):
return self.header.sort_order
@property
def references(self):
return self.header.references
def close(self):
self._file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __iter__(self):
for line in self._file:
if line.startswith('@'):
continue
yield SamEntry(line)
#
# Utilities
#
def tags_to_dictionary( tags ):
data_tags = {}
for tag in tags:
if tag[2] != ':':
msg = 'Not a valid tag: "{0}"'.format(tag)
raise TypeError( msg )
tag_id, tag_value = tag[:2], tag[3:]
data_tags[tag_id] = tag_value
return data_tags
def validate_tags( tags, valid_tags, required_tags ):
for tag in tags: # Check that all present tags are valid
if tag not in valid_tags:
msg = 'Invalid tag "{0}" present'.format(tag)
raise TypeError( msg )
for tag in required_tags: # Check that all required tags are present
if tag not in tags:
msg = 'Required tag "{0}" not present'.format(tag)
raise TypeError( msg )
| bsd-3-clause | 4,897,226,491,460,510,000 | 24.761682 | 88 | 0.552875 | false |
frashpikass/mvp-osm | mvp.py | 1 | 34109 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 24 5:26:00 2016
@author: Francesco Piazza
@author: Maurizio Napolitano
MIT License
"""
# NOTE: the SRID of the input DB is 4326
# NOTE: the SRID of the output DB is 3857
### Imports
import ConfigParser
import numpy as np
import os
import scipy as sc
import scipy.cluster.hierarchy
import scipy.spatial.distance
import sys
from datetime import datetime
from datetime import timedelta
from optparse import OptionParser
from pyspatialite import dbapi2 as db
### Global variables
TEMPLATECONFIG = "example.cfg"
### Classes
# Class to research pet locations generated by Most Valuable Player users
class MVP:
# Sublass to store the maximum boundaries of the analysis grid
class Gridbounds:
# Coordinates of upper left bound
min_x = None
min_y = None
# Coordinates of lower right bound
max_x = None
max_y = None
# Constructor for class Gridbounds
def __init__(self, min_x, min_y, max_x, max_y):
self.min_x = float(min_x)
self.min_y = float(min_y)
self.max_x = float(max_x)
self.max_y = float(max_y)
# List of input database table names from which points and users are gathered
# Suggested names are "osm_nodes", "osm_ways", "osm_relations"
tables = ["osm_nodes", "osm_relations", "osm_ways"]
# Path to input database
indb = None
# Path to output database
outdb = None
# Size of the time window in days (int)
days = None
# Grid resolution (meters? float)
grid = None
# Gridbounds object that stores the maximum boundaries of the analysis grid
gridbounds = None
# Spatial Reference System Identifier (SRID) for the input database
epsg_in = None
# Spatial Reference System Identifier (SRID) for the output database
epsg_out = None
# List of tags that indicate good local knowledge of the area
goodtags = None
# Constructor for class MVP
def __init__(self, indb, outdb, days, grid, epsg_in, epsg_out, goodtags_filename):
self.indb = indb
self.outdb = outdb
self.days = int(days)
self.grid = float(grid)
self.epsg_in = epsg_in
self.epsg_out = epsg_out
self.initGoodTags(goodtags_filename)
self.initGridbounds()
# Initializes both input and output db so that they're ready to update and query
def initdb(self):
cur = db.connect(self.outdb)
rs = cur.execute('SELECT sqlite_version(), spatialite_version()')
for row in rs:
msg = "> System dependencies are OK. SQLite v%s Spatialite v%s" % (row[0], row[1])
print msg
print("> Database setup in progress, it may take a few minutes...")
# Initialize the outdb for spatial operations
sql = 'SELECT InitSpatialMetadata()'
cur.execute(sql)
# Table creation queries for outdb
sql = '''CREATE TABLE points (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user STRING,
timestamp INTEGER);'''
cur.execute(sql)
sql = '''SELECT AddGeometryColumn('points',
'geometry', %s, 'POINT', 'XY');''' % self.epsg_out
cur.execute(sql)
sql = '''CREATE TABLE usersgrid (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user STRING,
density INTEGER,
activity INTEGER,
class INTEGER default 0,
gid INTEGER);'''
cur.execute(sql)
sql = '''SELECT AddGeometryColumn('usersgrid',
'geometry', %s, 'POINT', 'XY');''' % self.epsg_out
cur.execute(sql)
sql = '''CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user STRING UNIQUE );'''
cur.execute(sql)
# creating a POLYGON table
sql = '''CREATE TABLE grid (
id INTEGER PRIMARY KEY AUTOINCREMENT)'''
cur.execute(sql)
sql = '''SELECT AddGeometryColumn('grid',
'geometry', %s, 'POLYGON', 'XY')''' % self.epsg_out
cur.execute(sql)
sql = '''CREATE VIEW users_activity AS SELECT user,
( Round(JulianDay(max(timestamp)))-(JulianDay(min(timestamp))) ) as activity
FROM points GROUP BY user;'''
cur.execute(sql)
sql = '''CREATE TABLE petlocations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
gid INTEGER,
user STRING,
density INTEGER,
activity INTEGER,
class INTEGER default 0);'''
cur.execute(sql)
sql = '''SELECT AddGeometryColumn('petlocations',
'geometry', %s, 'POLYGON', 'XY');''' % self.epsg_out
cur.execute(sql)
# Initialize spatial indexes for all tables in all DBs
self.initSpatialIndex(self.indb)
self.initSpatialIndex(self.outdb)
rs.close()
cur.commit()
cur.close()
print "Database setup completed!"
# Initializes the spatial index of given database (necessary for spatialite RTree queries on database)
def initSpatialIndex(self, database):
cur = db.connect(database)
rs = cur.execute('SELECT * FROM geometry_columns')
print("> Initializing Spatial Index for db %s..." % database)
for row in rs:
# Check if there's already a Spatial Index for the current table of current db
if row[5] != 0:
print("\tSpatial Index for table '%s' already ok!" % (row[0]))
else:
print("\tInitializing Spatial Index for table '%s', please wait..." % (row[0]))
cur.execute("SELECT CreateSpatialIndex('%s', '%s');" % (row[0], row[1]))
print("\tSpatial Index initialized!")
rs.close()
cur.close()
# Get good tags from a multiline text file, sanitize them, init goodtags both here and in the indb
# for query optimization
def initGoodTags(self, filename):
print("> Initializing good tags in the input database...")
try:
f = open(filename, 'r')
self.goodtags = [item.strip() for item in f.readlines()]
f.close()
cur = db.connect(self.indb)
# Check if there's an old version of the goodtags table and eventually drop it
rs = cur.execute("SELECT sql FROM sqlite_master WHERE type = 'table' AND name = 'goodtags'")
if rs.fetchall():
cur.execute("DROP TABLE goodtags")
print("Stale table goodtags of indb dropped")
# Create and populate the goodtags table of indb
cur.execute("CREATE TABLE goodtags (k TEXT PRIMARY KEY)")
for tag in self.goodtags:
sql = "INSERT INTO goodtags (k) VALUES (\'%s\')" % tag
cur.execute(sql)
print("Table goodtags of indb created and populated")
cur.commit()
cur.close()
except OSError, e:
print "Error " + e
sys.exit(2)
# Initializes the gridbounds for later operations
def initGridbounds(self):
print "> Initializing grid boundaries..."
indb = db.connect(self.indb)
incur = indb.cursor()
# Gather the extreme boundaries of our node map (from indb), supposedly with INDB epsg
sql = '''
SELECT
Min(MbrMinX(osm_nodes.geometry)) AS min_x,
Min(MbrMinY(osm_nodes.geometry)) AS min_y,
Max(MbrMaxX(osm_nodes.geometry)) AS max_x,
Max(MbrMaxY(osm_nodes.geometry)) AS max_y
FROM osm_nodes;
'''
rs = incur.execute(sql).fetchone()
minx = rs[0]
miny = rs[1]
maxx = rs[2]
maxy = rs[3]
# Transform the boundaries in the new EPSG for outdb
sql = 'SELECT ST_X(transform(MakePoint(%s,%s,%s),%s)),' % (minx, miny, self.epsg_in, self.epsg_out)
sql += 'ST_Y(transform(MakePoint(%s,%s,%s),%s)),' % (minx, miny, self.epsg_in, self.epsg_out)
sql += 'ST_X(transform(MakePoint(%s,%s,%s),%s)),' % (maxx, maxy, self.epsg_in, self.epsg_out)
sql += 'ST_Y(transform(MakePoint(%s,%s,%s),%s));''' % (maxx, maxy, self.epsg_in, self.epsg_out)
rs = incur.execute(sql).fetchone()
# Initialize grid boundaries
self.gridbounds = self.Gridbounds(rs[0], rs[1], rs[2], rs[3])
# Close connections
incur.close()
indb.close()
# The users table is populated with the IDs of those users who have been active for more than self.days days
# and whose latest activity has been more recent than self.days days ago
def importusers(self):
print "> Importing prolific users who have been active recently..."
delta_days = self.days
indb = db.connect(self.indb)
dbout = db.connect(self.outdb)
incur = indb.cursor()
outcur = dbout.cursor()
# s is a flag to avoid repeating multiple executions of the user import (likely to be useless)
s = 0
for i in self.tables:
# Define the time interval for analyzing users
ago = ""
if delta_days != 0:
ago = datetime.today() - timedelta(delta_days)
# Create a view that focuses on recently active users
sql = '''
CREATE VIEW users_lastdays AS
SELECT user, MAX(timestamp) AS tempo
FROM %s GROUP BY user;
''' % i
incur.execute(sql)
# Setup for the next SQL query to gather usernames
if delta_days > 0:
# This is the query if we have an analysis window larger than 0 days
sql = '''
SELECT DISTINCT(user)
FROM users_lastdays
WHERE tempo > "%s";
''' % str(ago)
else:
# This is the query if we have an analysis window smaller than 0 days
sql = "SELECT distinct(user) FROM %s;" % i
# Query execution
rs = incur.execute(sql)
r = rs.fetchall()
# If it's the first time we try to initialize the users...
if s == 0:
# for each user in the resultset from previous query...
for u in r:
user = u[0]
sql = "INSERT INTO users (user) VALUES (?);"
if user is not None:
# If the user is valid, insert it in the users table of the outdb
outcur.execute(sql, [user])
s += 1
# If we have already inserted some users...
else:
# For each user in the resultset
for u in r:
user = u[0]
# Search for that user among users we've previously inserted in outdb's users table
sql = "SELECT user FROM users WHERE user = (?);" # user
rsu = list(outcur.execute(sql, [user]))
# If the user is new for the users table, insert it
if len(rsu) == 0:
sql = "INSERT INTO users (user) VALUES (?);"
outcur.execute(sql, [user])
s += 1
# At the end, cleanup the indb if needed, by removing the view we had created before
# This happens every time we finish analyzing a new table
if delta_days > 0:
sql = "DROP VIEW users_lastdays;"
incur.execute(sql)
# Print the operation results
print("Users imported from table %s" % i)
# Finalize and close connections
outcur.close()
incur.close()
indb.commit()
dbout.commit()
indb.close()
dbout.close()
print("%d users imported" % s)
# Insert a selection of nodes described by goodtags into the points table
def insertptlnodes(self):
print "> Searching the input database for nodes described by good tags..."
indb = db.connect(self.indb)
incur = indb.cursor()
dbout = db.connect(self.outdb)
outcur = dbout.cursor()
for table in self.tables:
sql = None
if table == 'osm_nodes':
# The following code isn't very efficient as is:
#
# w = ' in ('
# for t in self.goodtags:
# t = "'" + t.rstrip() + "',"
# w += t
# w += ")"
# where_badtags = w.replace("(,", "(")
# w = where_badtags.replace(",)", ")")
#
# sql = 'select X(transform(osm_nodes.Geometry,%s)) as x,' % (self.epsg_out)
# sql += 'Y(transform(osm_nodes.Geometry,%s)) as y ' % (self.epsg_out)
# sql += ', timestamp, user from osm_nodes '
# sql += ' natural join %s_tags where %s_tags.k' % (table.rstrip('s'), table.rstrip('s'))
# sql += w
# sql += " GROUP BY user;"
#
# rs = incur.execute(sql)
# for r in rs:
# if (r[2] is not None):
# p = "GeomFromText('POINT(%s %s)',%s)" % (r[0], r[1], self.epsg_out)
# sql = "INSERT INTO points (user, timestamp, geometry) "
# sql += "VALUES (?,?,%s)" % p # % (r[3],r[2],p)
# outcur.execute(sql, (r[3], r[2]))
# dbout.commit()
#
# Thus, I substitute the previous code with the following query, which has the purpose
# of returning the correctly contextualized geometry of every node which is described by a goodtag,
# along with the user who inserted that node and the timestamp of the last node edit
sql = '''
SELECT n.user AS user, n.timestamp as timestamp, AsText( Transform(n.Geometry, %s) ) as GeometryWKT
FROM(
SELECT *
FROM osm_node_tags
WHERE k IN goodtags
) AS q
INNER JOIN osm_nodes AS n
WHERE q.node_id = n.node_id;
''' % self.epsg_out
elif table == 'osm_ways':
# This happens if tables other than osm_nodes are included in the set of tables to analyze
# The original code (here in a working condition) was very inefficient, so I decided
# to substitute it with some ludicrously efficient queries on the db
# for t in self.goodtags:
# idname = table.replace("osm_", "").rstrip('s') + "_id"
# idname = idname.replace("relation", "rel")
# sql = 'select distinct(%s.%s) from %s' % (table, idname, table)
# sql += ' natural join %s_tags where ' % (table.rstrip('s'))
# sql += '%s_tags.k like "%s" ' % (table.rstrip('s'), t.rstrip())
# sql += " group by user"
# rs = incur.execute(sql)
# ids = rs.fetchall()
# for i in ids:
# sql = 'select distinct(osm_nodes.node_id), timestamp, user ' \
# 'from osm_nodes natural join %s_refs where ' % (table.rstrip('s'))
# sql += '%s_refs.%s = %s' % (table.rstrip('s'), idname, i[0])
# rs = incur.execute(sql)
# idp = rs.fetchall()
# for ip in idp:
# sql = 'select X(transform(osm_nodes.Geometry,%s)) as x,' % (self.epsg_out)
# sql += 'Y(transform(osm_nodes.Geometry,%s)) as y, osm_nodes.timestamp ' % (self.epsg_out)
# sql += ' from osm_nodes'
# sql += ' where osm_nodes.node_id = %s' % ip[0]
# record = incur.execute(sql)
# v = record.fetchone()
# p = "GeomFromText('POINT(%s %s)',%s)" % (v[0], v[1], self.epsg_out)
# sql = "INSERT INTO points (user, timestamp, geometry) "
# sql += "VALUES (?,?,%s)" % p
# outcur.execute(sql, (ip[2], ip[1]))
# dbout.commit()
#
# The following query efficiently returns the correctly contextualized geometry of every node which is
# part of a way described by a good tag, together with the name of the user who took care of the way
# and the timestamp that indicates when the way was edited for the last time
sql = '''
SELECT q2.user AS user, q2.timestamp AS timestamp, AsText( Transform(n2.Geometry, %s)) AS GeometryWKT
FROM (
SELECT q1.user AS user, q1.timestamp AS timestamp, r1.node_id AS node_id
FROM (
SELECT q.way_id as way_id, q.sub AS sub, t.user AS user, t.timestamp AS timestamp
FROM (
SELECT way_id, sub
FROM osm_way_tags
WHERE osm_way_tags.k IN goodtags
) AS q
INNER JOIN osm_ways AS t
ON q.way_id = t.way_id
) AS q1
INNER JOIN osm_way_refs AS r1
ON (q1.way_id=r1.way_id AND q1.sub=r1.sub)
) AS q2
INNER JOIN osm_nodes AS n2
ON q2.node_id = n2.node_id;
''' % self.epsg_out
elif table == 'osm_relations':
# The following query efficiently returns the correctly contextualized geometry of every node which is
# part of a relation described by a good tag, together with the name of the user who took care of
# the way and the timestamp that indicates when the relation was edited for the last time
sql = '''
SELECT user, timestamp, AsText( Transform(Geometry, %s) ) AS GeometryWKT
FROM(
SELECT q2.user AS user, q2.timestamp AS timestamp, n2.Geometry AS Geometry
FROM(
SELECT r1.user AS user, r1.timestamp AS timestamp, q1.type AS type, q1.ref AS ref
FROM(
SELECT t.rel_id AS rel_id, r.type AS type, r.ref AS ref
FROM osm_relation_tags AS t
INNER JOIN osm_relation_refs AS r
ON (t.rel_id = r.rel_id
AND t.sub = r.sub)
WHERE k IN goodtags
) AS q1
INNER JOIN osm_relations AS r1
ON q1.rel_id = r1.rel_id
WHERE q1.type = 'N'
) AS q2
INNER JOIN osm_nodes AS n2
ON q2.ref = n2.node_id
UNION
SELECT q3.user AS user, q3.timestamp AS timestamp, n3.Geometry AS Geometry
FROM(
SELECT r1.user AS user, r1.timestamp AS timestamp, q1.type AS type, q1.ref AS ref
FROM(
SELECT t.rel_id AS rel_id, r.type AS type, r.ref AS ref
FROM osm_relation_tags AS t
INNER JOIN osm_relation_refs AS r
ON (t.rel_id = r.rel_id
AND t.sub = r.sub)
WHERE k IN goodtags
) AS q1
INNER JOIN osm_relations AS r1
ON q1.rel_id = r1.rel_id
WHERE q1.type = 'W'
) AS q3
INNER JOIN osm_way_refs AS r3
ON q3.ref = r3.way_id
INNER JOIN osm_nodes AS n3
ON r3.node_id = n3.node_id
);
''' % self.epsg_out
# Insert our precious data into the outdb
rs = incur.execute(sql)
for r in rs:
p = "GeomFromText(\'%s\',%s)" % (r[2], self.epsg_out)
sql = "INSERT INTO points (user, timestamp, geometry)"
sql += "VALUES (?,?,%s);" % p # % (r[0],r[1],p)
outcur.execute(sql, (r[0], r[1]))
dbout.commit()
print("Nodes imported from table %s" % table)
# Finalize and close connections
outcur.close()
dbout.close()
incur.close()
indb.close()
# Creates a grid shaped geometrical structure in the outdb for further operations
def creategrid(self):
print "> Creating a grid structure in the output database..."
# Connect to the output db
dbout = db.connect(self.outdb)
outcur = dbout.cursor()
# Define the boundaries of the first square of the grid
stepminx = self.gridbounds.min_x
stepmaxx = self.gridbounds.min_x + self.grid
stepminy = self.gridbounds.min_y
stepmaxy = self.gridbounds.min_y + self.grid
# Grid creation loop
while True:
# Create a polygon using the boundaries of a square on the grid and insert it in the 'grid' table in outdb
p = "GeomFromText('POLYGON(("
p += "%f %f, " % (stepminx, stepminy)
p += "%f %f, " % (stepmaxx, stepminy)
p += "%f %f, " % (stepmaxx, stepmaxy)
p += "%f %f, " % (stepminx, stepmaxy)
p += "%f %f" % (stepminx, stepminy)
p += "))',%s)" % self.epsg_out
sql = "INSERT INTO grid (geometry) "
sql += "VALUES (%s);" % p
outcur.execute(sql)
# Update the boundaries
if stepmaxx < self.gridbounds.max_x:
# Step forward one column
stepminx = stepmaxx
stepmaxx += self.grid
else:
# Step forward one row
stepminx = self.gridbounds.min_x
stepmaxx = self.gridbounds.min_x + self.grid
stepminy += self.grid
stepmaxy += self.grid
# Check if our cursor is out of all maximum boundaries
if stepmaxy > self.gridbounds.max_y:
# Stop adding squares to the grid
break
# Finalize changes and close connections
dbout.commit()
outcur.close()
dbout.close()
# Place user contributions into each grid cell if those users are likely to be MVPs
def createusersgrid(self):
print "> Creating user contributions grids..."
# Connect to the output db
dbout = db.connect(self.outdb)
outcur = dbout.cursor()
cursor = dbout.cursor()
sql = '''
SELECT
count(pid) AS density,
( Round(JulianDay(max(timestamp))) - Round(JulianDay(min(timestamp))) ) AS activity,
user,
gid,
centerWKT
FROM (
SELECT
points.id AS pid,
points.user AS user,
points.timestamp AS timestamp,
grid.id AS gid,
AsWKT(Centroid(grid.geometry)) AS centerWKT
FROM points, grid
WHERE points.user IN (
SELECT user FROM users_activity WHERE activity > %d
)
AND points.ROWID IN
(
SELECT ROWID
FROM SpatialIndex
WHERE f_table_name = 'points'
AND search_frame = grid.geometry
)
)
GROUP BY user, gid;
''' % self.days
rs = cursor.execute(sql)
# Data entry
for r in rs:
if r is not None:
density = int(r[0])
activity = int(r[1])
if activity is None:
activity = 0
user = r[2]
gid = r[3]
wkt = r[4]
if user is not None:
p = "GeomFromText(\'%s\',%s)" % (wkt, self.epsg_out)
sql = "INSERT INTO usersgrid (user, density, activity, geometry, gid) "
sql += "VALUES (?,%d,%d,%s,%d);" % (density, activity, p, gid)
outcur.execute(sql, [user])
# Finalize changes and close connections
dbout.commit()
cursor.close()
outcur.close()
dbout.close()
# Cluster good user contributions from the usersgrid in contiguous groups of cells (classes).
# Clusters will be retrievable from the usersgrid by looking at columns user and class.
# Every user can have multiple clusters in different places.
def clustergridgroup(self):
dbout = db.connect(self.outdb)
outcur = dbout.cursor()
print("> Calculating user contribution clusters (gridsize is %s)..." % self.grid)
# Fetch users from the usersgrid
users = self.getusers()
# For every user...
for u in users:
# Query all user contributions for user u in the usersgrid
sql = '''
SELECT id, ST_X(geometry), ST_Y(geometry)
FROM usersgrid
WHERE user=\"%s\";
''' % u
rs = outcur.execute(sql)
# result = array of cell centers where user u has been active
result = []
# ids = array of contribution IDs
ids = []
for r in rs:
t = (r[1], r[2])
result.append(t)
ids.append(r[0])
if len(result) > 1:
# d = array of contribution coordinates
d = np.array(result)
# dist = distance matrix
dist = scipy.spatial.distance.pdist(d, 'euclidean')
# Z = linkage matrix
z = sc.cluster.hierarchy.single(dist)
# clustgroup = flat clusters from the hierarchical clustering defined by the linkage matrix Z
# t is the threshold for clustering and it's the chosen grid size
clustgroup = sc.cluster.hierarchy.fcluster(z, t=self.grid, criterion='distance')
k = 0
out = dbout.cursor()
for c in clustgroup:
c = int(c)
idp = int(ids[k])
sql = '''
UPDATE usersgrid
SET class=%d
WHERE id=%d;
''' % (c, idp)
out.execute(sql)
k += 1
dbout.commit()
out.close()
outcur.close()
# Fetch all distinct users from the usersgrid and return them in a list
def getusers(self):
dbout = db.connect(self.outdb)
outcur = dbout.cursor()
sql = "SELECT DISTINCT(user) FROM usersgrid;"
users = list(outcur.execute(sql))
outcur.close()
return users
# Populate the petlocations table
def petlocations(self):
print "> Calculating pet locations..."
dbout = db.connect(self.outdb)
outcur = dbout.cursor()
sql = '''
SELECT
count(grid.id) AS gid,
asText(CastToPolygon(gunion(grid.geometry))) AS geometry,
usersgrid.class AS class,
usersgrid.user AS user,
max(usersgrid.activity) AS activity,
max(usersgrid.density) AS density,
geometrytype(gunion(grid.geometry)) AS tipo
FROM
usersgrid,
grid
where usersgrid.rowid IN
(
SELECT ROWID
FROM SpatialIndex
WHERE f_table_name = 'usersgrid'
AND search_frame = grid.geometry
)
GROUP BY usersgrid.class, usersgrid.user
ORDER BY user DESC;
'''
rs = outcur.execute(sql).fetchall()
for r in rs:
gid = r[0]
geometry = r[1]
clas = r[2]
user = r[3]
activity = r[4]
density = r[5]
g = "GeomFromText(\"%s\", %s)" % (geometry, self.epsg_out)
sql = '''
INSERT INTO petlocations (gid, geometry, class, user,activity,density)
VALUES (?,%s,?,?,?,?);
''' % g
outcur.execute(sql, [gid, clas, user, activity, density])
dbout.commit()
outcur.close()
### Script functions
# Main script logic flow
def execMVP(cmd):
days = None
epsg_in = None
epsg_out = None
outdb = None
indb = None
grid = None
goodtags_filename = "conf/goodtags"
# If the user asked to use a config file...
if cmd.config:
try:
# Parse options from the config file
parser = ConfigParser.ConfigParser()
parser.readfp(open(cmd.config))
goodtags_filename = parser.get("goodtags", "file")
epsg_in = parser.get("config", "epsg_in")
epsg_out = parser.get("config", "epsg_out")
days = parser.get("config", "days")
indb = parser.get("indb", "infile")
outdb = parser.get("outdb", "outfile")
except ConfigParser.NoOptionError, e:
print "Error %s " % e
sys.exit(2)
# Set up parameters from user input
if cmd.input:
indb = cmd.input
if cmd.output:
outdb = cmd.output
if cmd.tags:
goodtags_filename = cmd.tags
if cmd.epsgin:
epsg_in = cmd.epsgin
if cmd.epsgout:
epsg_out = cmd.epsgout
if cmd.grid:
grid = cmd.grid
if cmd.days:
days = cmd.days
# Default parameters
if days is None:
days = 180
if grid is None:
grid = 10000
if epsg_in is None:
epsg_in = "4326"
if epsg_out is None:
epsg_out = "3857"
print("ExecMVP: %d %s %s %s %s %d %s" % (days, epsg_in, epsg_out, indb, outdb, grid, goodtags_filename)) # debug
# Initialize the MVP object
mu = MVP(indb, outdb, days, grid, epsg_in, epsg_out, goodtags_filename)
# Initialize the DBs
mu.initdb()
# Create a grid in the outer DB (grid = grid resolution)
mu.creategrid()
# Import users from indb to outdb
# The users table is populated with the IDs of those users who have recently been active
mu.importusers()
# Insert a list of nodes described by goodtags into the outdb points table
mu.insertptlnodes()
# Place user contributions into each grid cell if those users are likely to be MVPs
mu.createusersgrid()
# Cluster user contributions in contiguous groups of cells (classes)
mu.clustergridgroup()
# Populate the pet location table
mu.petlocations()
print("All operations were completed with success!")
print("Enjoy your data in %s - Import it in QGIS to visualize it!" % cmd.output)
print("(Use \'spatialite_osm_map\' to set up the undelying map from the original osm file)")
# CLI user interface
def main():
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-c", "--config", action="store", dest="config",
help="fetch execution parameters from a configuration file")
parser.add_option("-C", "--create", action="store_true", dest="create",
help="create a configuration file template - useful to create a new configuration file",
default=False)
parser.add_option("-i", "--input", action="store", dest="input", help="input sqlite/spatialite database file *")
parser.add_option("-o", "--output", action="store", dest="output", help="output sqlite/spatialite database file *")
parser.add_option("-e", "--epsgin", action="store", dest="epsgin",
help="epsg metric (SRID) for the input database *")
parser.add_option("-E", "--epsgout", action="store", dest="epsgout",
help="epsg metric (SRID) for the output database *")
parser.add_option("-g", "--grid", action="store", dest="grid", help="grid size expressed in epsg unit *")
parser.add_option("-d", "--days", action="store", dest="days", help="time window for analysis expressed in days *")
parser.add_option("-t", "--tags", action="store", dest="tags",
help="txt file with the list of good tags to search *")
(options, args) = parser.parse_args()
if not options.create:
if (options.input and options.output is not None) or (options.config is not None):
execMVP(options)
else:
parser.print_help()
print("* overrides options from the config file")
print("Remember to use \'spatialite_osm_raw\' on the original osm file "
"to get an sqlite input file for this script!")
sys.exit(0)
else:
try:
f = open(os.getcwd() + os.path.sep + TEMPLATECONFIG, 'w')
f.write("[config]\n")
f.write("epsg_in: 4326\n")
f.write("epsg_out: 3857\n")
f.write("grid: 10000\n")
f.write("days: 180\n")
f.write("[goodtags]\n")
f.write("file: conf/goodtags.txt\n")
f.write("[indb]\n")
f.write("infile:data/file.sqlite\n")
f.write("[outdb]\n")
f.write("outfile:data/mvposm.sqlite")
f.close()
except OSError, e:
print "Error " + e
sys.exit(2)
if __name__ == "__main__":
main()
| mit | -8,127,420,920,717,677,000 | 38.341407 | 119 | 0.519335 | false |
aurule/npc | tests/character/test_properties.py | 1 | 1783 | """
Test all getters and setters in the Character class
Includes all methods that set and retrieve data
"""
import npc
from npc.character import Character
import pytest
class TestTypeKey:
def test_casing(self):
"""Type key should always be lower case"""
char = Character(type=['Fish', 'Great Ape'])
assert char.type_key == 'fish'
def test_empty(self):
char = Character()
assert char.type_key is None
class TestLocations:
def test_foreign(self):
char = Character()
char.tags('foreign').append('Mars')
assert 'Mars' in char.locations
def test_location(self):
char = Character()
char.tags('location').append('Mars')
assert 'Mars' in char.locations
def test_both(self):
char = Character()
char.tags('location').append('Mars')
char.tags('foreign').append('Mercury')
assert 'Mars' in char.locations
assert 'Mercury' in char.locations
def test_removes_empties(self):
char = Character()
char.tags('location').append('Mars')
char.tags('foreign').append('')
assert len(list(char.locations)) == 1
class TestHasLocations:
def test_foreign(self):
char = Character()
char.tags('foreign').append('Mars')
assert char.has_locations
def test_location(self):
char = Character()
char.tags('location').append('Mars')
assert char.has_locations
def test_both(self):
char = Character()
char.tags('location').append('Mars')
char.tags('foreign').append('Mercury')
assert char.has_locations
def test_empty(self):
char = Character()
char.tags('foreign').append('')
assert not char.has_locations
| mit | 5,626,822,037,985,338,000 | 26.430769 | 52 | 0.609647 | false |
code-geek/nepalicalendar-py | tests/test_tithis.py | 1 | 1272 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
from nepalicalendar import TITHIS, NepDate, NepCal
from datetime import date, timedelta
from random import randint
class TithisTestCase(unittest.TestCase):
"""
Test case for the tithis
"""
def test_tithi_month_count(self):
"""
Make sure that the tithi data is in match with the month data
"""
for i in range(2040, 2071):
self.assertEqual(len(TITHIS[i]), 12)
for j in range(1, 13):
try:
self.assertEqual(len(TITHIS[i][j - 1]), NepCal.monthrange(i, j))
except:
print("Error in year %d month %d" % (i, j))
raise
def test_nepdate_tithi(self):
d = NepDate(2069, 2, 3)
self.assertEqual(d.tithi, 11)
with self.assertRaises(ValueError):
a = NepDate(2031, 1, 1).update().tithi
with self.assertRaises(ValueError):
a = NepDate(2091, 1, 1).update().tithi
# TODO: Add more tithi tests
def test_nepdate_tithi_name(self):
self.assertEqual(NepDate(2069, 4, 15).ne_tithi_name(), u"द्वादशी")
pass
| mit | -6,756,676,890,935,712,000 | 28.952381 | 84 | 0.574722 | false |
Grunny/zap-cli | zapcli/cli.py | 1 | 11868 | """
ZAP CLI.
"""
import sys
import click
from zapcli import __version__
from zapcli import helpers
from zapcli.commands.context import context_group
from zapcli.commands.policies import policies_group
from zapcli.commands.scanners import scanner_group
from zapcli.commands.scripts import scripts_group
from zapcli.commands.session import session_group
from zapcli.log import console
from zapcli.zap_helper import ZAPHelper
@click.group(help='ZAP CLI v{0} - A simple commandline tool for OWASP ZAP.'.format(__version__))
@click.option('--boring', is_flag=True, default=False, help='Remove color from console output.')
@click.option('--verbose', '-v', is_flag=True, default=False, type=bool,
help='Add more verbose debugging output.')
@click.option('--zap-path', default='/zap', envvar='ZAP_PATH', type=str,
help='Path to the ZAP daemon. Defaults to /zap or the value of the environment variable ZAP_PATH.')
@click.option('--port', '-p', default=8090, envvar='ZAP_PORT', type=int,
help='Port of the ZAP proxy. Defaults to 8090 or the value of the environment variable ZAP_PORT.')
@click.option('--zap-url', default='http://127.0.0.1', envvar='ZAP_URL', type=str,
help='The URL of the ZAP proxy. Defaults to http://127.0.0.1 or the value of the environment ' +
'variable ZAP_URL.')
@click.option('--api-key', default='', envvar='ZAP_API_KEY', type=str,
help='The API key for using the ZAP API if required. Defaults to the value of the environment ' +
'variable ZAP_API_KEY.')
@click.option('--log-path', envvar='ZAP_LOG_PATH', type=str,
help='Path to the directory in which to save the ZAP output log file. Defaults to the value of ' +
'the environment variable ZAP_LOG_PATH and uses the value of --zap-path if it is not set.')
@click.pass_context
def cli(ctx, boring, verbose, zap_path, port, zap_url, api_key, log_path):
"""Main command line entry point."""
console.colorize = not boring
if verbose:
console.setLevel('DEBUG')
else:
console.setLevel('INFO')
ctx.obj = ZAPHelper(zap_path=zap_path, port=port, url=zap_url, api_key=api_key, log_path=log_path)
@cli.command('start', short_help='Start the ZAP daemon.')
@click.option('--start-options', '-o', type=str,
help='Extra options to pass to the ZAP start command, e.g. "-config api.key=12345"')
@click.pass_obj
def start_zap_daemon(zap_helper, start_options):
"""Helper to start the daemon using the current config."""
console.info('Starting ZAP daemon')
with helpers.zap_error_handler():
zap_helper.start(options=start_options)
@cli.command('shutdown')
@click.pass_obj
def shutdown_zap_daemon(zap_helper):
"""Shutdown the ZAP daemon."""
console.info('Shutting down ZAP daemon')
with helpers.zap_error_handler():
zap_helper.shutdown()
@cli.command('status', short_help='Check if ZAP is running.')
@click.option('--timeout', '-t', type=int,
help='Wait this number of seconds for ZAP to have started')
@click.pass_obj
def check_status(zap_helper, timeout):
"""
Check if ZAP is running and able to receive API calls.
You can provide a timeout option which is the amount of time in seconds
the command should wait for ZAP to start if it is not currently running.
This is useful to run before calling other commands if ZAP was started
outside of zap-cli. For example:
zap-cli status -t 60 && zap-cli open-url "http://127.0.0.1/"
Exits with code 1 if ZAP is either not running or the command timed out
waiting for ZAP to start.
"""
with helpers.zap_error_handler():
if zap_helper.is_running():
console.info('ZAP is running')
elif timeout is not None:
zap_helper.wait_for_zap(timeout)
console.info('ZAP is running')
else:
console.error('ZAP is not running')
sys.exit(2)
@cli.command('open-url')
@click.argument('url')
@click.pass_obj
def open_url(zap_helper, url):
"""Open a URL using the ZAP proxy."""
console.info('Accessing URL {0}'.format(url))
zap_helper.open_url(url)
@cli.command('spider')
@click.argument('url')
@click.option('--context-name', '-c', type=str, help='Context to use if provided.')
@click.option('--user-name', '-u', type=str,
help='Run scan as this user if provided. If this option is used, the context parameter must also ' +
'be provided.')
@click.pass_obj
def spider_url(zap_helper, url, context_name, user_name):
"""Run the spider against a URL."""
console.info('Running spider...')
with helpers.zap_error_handler():
zap_helper.run_spider(url, context_name, user_name)
@cli.command('ajax-spider')
@click.argument('url')
@click.pass_obj
def ajax_spider_url(zap_helper, url):
"""Run the AJAX Spider against a URL."""
console.info('Running AJAX Spider...')
zap_helper.run_ajax_spider(url)
@cli.command('active-scan', short_help='Run an Active Scan.')
@click.argument('url')
@click.option('--scanners', '-s', type=str, callback=helpers.validate_scanner_list,
help='Comma separated list of scanner IDs and/or groups to use in the scan. Use the scanners ' +
'subcommand to get a list of IDs. Available groups are: {0}.'.format(
', '.join(['all'] + list(ZAPHelper.scanner_group_map.keys()))))
@click.option('--recursive', '-r', is_flag=True, default=False, help='Make scan recursive.')
@click.option('--context-name', '-c', type=str, help='Context to use if provided.')
@click.option('--user-name', '-u', type=str,
help='Run scan as this user if provided. If this option is used, the context parameter must also ' +
'be provided.')
@click.pass_obj
def active_scan(zap_helper, url, scanners, recursive, context_name, user_name):
"""
Run an Active Scan against a URL.
The URL to be scanned must be in ZAP's site tree, i.e. it should have already
been opened using the open-url command or found by running the spider command.
"""
console.info('Running an active scan...')
with helpers.zap_error_handler():
if scanners:
zap_helper.set_enabled_scanners(scanners)
zap_helper.run_active_scan(url, recursive, context_name, user_name)
@cli.command('alerts')
@click.option('--alert-level', '-l', default='High', type=click.Choice(ZAPHelper.alert_levels.keys()),
help='Minimum alert level to include in report (default: High).')
@click.option('--output-format', '-f', default='table', type=click.Choice(['table', 'json']),
help='Output format to print the alerts.')
@click.option('--exit-code', default=True, type=bool,
help='Whether to set a non-zero exit code when there are any alerts of the specified ' +
'level (default: True).')
@click.pass_obj
def show_alerts(zap_helper, alert_level, output_format, exit_code):
"""Show alerts at the given alert level."""
alerts = zap_helper.alerts(alert_level)
helpers.report_alerts(alerts, output_format)
if exit_code:
code = 1 if len(alerts) > 0 else 0
sys.exit(code)
@cli.command('quick-scan', short_help='Run a quick scan.')
@click.argument('url')
@click.option('--self-contained', '-sc', is_flag=True, default=False,
help='Make the scan self-contained, i.e. start the daemon, open the URL, scan it, ' +
'and shutdown the daemon when done.')
@click.option('--scanners', '-s', type=str, callback=helpers.validate_scanner_list,
help='Comma separated list of scanner IDs and/or groups to use in the scan. Use the scanners ' +
'subcommand to get a list of IDs. Available groups are: {0}.'.format(
', '.join(['all'] + list(ZAPHelper.scanner_group_map.keys()))))
@click.option('--spider', is_flag=True, default=False, help='If set, run the spider before running the scan.')
@click.option('--ajax-spider', is_flag=True, default=False, help='If set, run the AJAX Spider before running the scan.')
@click.option('--recursive', '-r', is_flag=True, default=False, help='Make scan recursive.')
@click.option('--alert-level', '-l', default='High', type=click.Choice(ZAPHelper.alert_levels.keys()),
help='Minimum alert level to include in report.')
@click.option('--exclude', '-e', type=str, callback=helpers.validate_regex,
help='Regex to exclude from all aspects of the scan')
@click.option('--start-options', '-o', type=str,
help='Extra options to pass to the ZAP start command when the --self-contained option is used, ' +
' e.g. "-config api.key=12345"')
@click.option('--output-format', '-f', default='table', type=click.Choice(['table', 'json']),
help='Output format to print the alerts.')
@click.option('--context-name', '-c', type=str, help='Context to use if provided.')
@click.option('--user-name', '-u', type=str,
help='Run scan as this user if provided. If this option is used, the context parameter must also ' +
'be provided.')
@click.pass_obj
def quick_scan(zap_helper, url, **options):
"""
Run a quick scan of a site by opening a URL, optionally spidering the URL,
running an Active Scan, and reporting any issues found.
This command contains most scan options as parameters, so you can do
everything in one go.
If any alerts are found for the given alert level, this command will exit
with a status code of 1.
"""
if options['self_contained']:
console.info('Starting ZAP daemon')
with helpers.zap_error_handler():
zap_helper.start(options['start_options'])
console.info('Running a quick scan for {0}'.format(url))
with helpers.zap_error_handler():
if options['scanners']:
zap_helper.set_enabled_scanners(options['scanners'])
if options['exclude']:
zap_helper.exclude_from_all(options['exclude'])
zap_helper.open_url(url)
if options['spider']:
zap_helper.run_spider(url, options['context_name'], options['user_name'])
if options['ajax_spider']:
zap_helper.run_ajax_spider(url)
zap_helper.run_active_scan(url, options['recursive'], options['context_name'], options['user_name'])
alerts = zap_helper.alerts(options['alert_level'])
helpers.report_alerts(alerts, options['output_format'])
if options['self_contained']:
console.info('Shutting down ZAP daemon')
with helpers.zap_error_handler():
zap_helper.shutdown()
exit_code = 1 if len(alerts) > 0 else 0
sys.exit(exit_code)
@cli.command('exclude', short_help='Exclude a pattern from all scanners.')
@click.argument('pattern', callback=helpers.validate_regex)
@click.pass_obj
def exclude_from_scanners(zap_helper, pattern):
"""Exclude a pattern from proxy, spider and active scanner."""
with helpers.zap_error_handler():
zap_helper.exclude_from_all(pattern)
@cli.command('report')
@click.option('--output', '-o', help='Output file for report.')
@click.option('--output-format', '-f', default='xml', type=click.Choice(['xml', 'html', 'md']),
help='Report format.')
@click.pass_obj
def report(zap_helper, output, output_format):
"""Generate XML, MD or HTML report."""
if output_format == 'html':
zap_helper.html_report(output)
elif output_format == 'md':
zap_helper.md_report(output)
else:
zap_helper.xml_report(output)
console.info('Report saved to "{0}"'.format(output))
# Add subcommand groups
cli.add_command(context_group)
cli.add_command(policies_group)
cli.add_command(scanner_group)
cli.add_command(scripts_group)
cli.add_command(session_group)
| mit | -8,610,406,788,646,297,000 | 40.788732 | 120 | 0.656387 | false |
MiniSEC/GRR_clone | lib/rdfvalues/structs.py | 1 | 55661 | #!/usr/bin/env python
"""Semantic Protobufs are serialization agnostic, rich data types."""
import copy
import cStringIO
import json
import logging
import struct
from google.protobuf import text_format
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import type_info
from grr.lib import utils
from grr.proto import semantic_pb2
# pylint: disable=super-init-not-called
# We copy these here to remove dependency on the protobuf library.
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# The following are the varint encoding/decoding functions taken from the
# protobuf library. Placing them in this file allows us to remove dependency on
# the standard protobuf library.
ORD_MAP = dict((chr(x), x) for x in range(0, 256))
CHR_MAP = dict((x, chr(x)) for x in range(0, 256))
HIGH_CHR_MAP = dict((x, chr(0x80 | x)) for x in range(0, 256))
# This function is HOT.
def ReadTag(buf, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple."""
try:
start = pos
while ORD_MAP[buf[pos]] & 0x80:
pos += 1
pos += 1
return (buf[start:pos], pos)
except IndexError:
raise ValueError("Invalid tag")
# This function is HOT.
def VarintWriter(write, value):
"""Convert an integer to a varint and write it using the write function."""
if value < 0:
raise ValueError("Varint can not encode a negative number.")
bits = value & 0x7f
value >>= 7
while value:
write(HIGH_CHR_MAP[bits])
bits = value & 0x7f
value >>= 7
return write(CHR_MAP[bits])
def SignedVarintWriter(write, value):
"""Encode a signed integer as a zigzag encoded signed integer."""
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(HIGH_CHR_MAP[bits])
bits = value & 0x7f
value >>= 7
return write(CHR_MAP[bits])
# This function is HOT.
def VarintReader(buf, pos):
"""A 64 bit decoder from google.protobuf.internal.decoder."""
result = 0
shift = 0
while 1:
b = ORD_MAP[buf[pos]]
result |= ((b & 0x7f) << shift)
pos += 1
if not b & 0x80:
return (result, pos)
shift += 7
if shift >= 64:
raise rdfvalue.DecodeError("Too many bytes when decoding varint.")
def SignedVarintReader(buf, pos):
"""A Signed 64 bit decoder from google.protobuf.internal.decoder."""
result = 0
shift = 0
while 1:
b = ORD_MAP[buf[pos]]
result |= ((b & 0x7f) << shift)
pos += 1
if not b & 0x80:
if result > 0x7fffffffffffffff:
result -= (1 << 64)
return (result, pos)
shift += 7
if shift >= 64:
raise rdfvalue.DecodeError("Too many bytes when decoding varint.")
class ProtoType(type_info.TypeInfoObject):
"""A specific type descriptor for protobuf fields.
This is an abstract class - do not instantiate directly.
"""
# Must be overridden by implementations.
wire_type = None
# We cache the serialized version of the tag here so we just need to do a
# string comparison instead of decoding the tag each time.
tag_data = None
# The semantic type of the object described by this descriptor.
type = None
# The type name according to the .proto domain specific language.
proto_type_name = "string"
def __init__(self, field_number=None, required=False, **kwargs):
super(ProtoType, self).__init__(**kwargs)
self.field_number = field_number
self.required = required
if field_number is None:
raise type_info.TypeValueError("No valid field number specified.")
# In python Varint encoding is expensive so we want to move as much of the
# hard work from the Write() methods which are called frequently to the type
# descriptor constructor which is only called once (during protobuf
# decleration time). Pre-calculating the tag makes for faster serialization.
self.tag = self.field_number << 3 | self.wire_type
tmp = cStringIO.StringIO()
VarintWriter(tmp.write, self.tag)
self.tag_data = tmp.getvalue()
def IsDirty(self, unused_python_format):
"""Return and clear the dirty state of the python object."""
return False
def Write(self, stream, value):
"""Encode the tag and value into the stream.
Note that value should already be in wire format.
This function is HOT.
Args:
stream: The stream to write on.
value: This is the value to write encoded according to the specific wire
format of this type.
"""
raise NotImplementedError()
def Read(self, buff, index):
"""Read a value from the buffer.
Note that reading into the wire format should be as fast as possible.
This function is HOT.
Args:
buff: A string to read from.
index: Where to start reading from.
Returns:
A value encoded in wire format specific to this type.
"""
raise NotImplementedError()
def ConvertFromWireFormat(self, value):
"""Convert value from the internal type to the real type.
When data is being parsed, it might be quicker to store it in a different
format internally. This is because we must parse all tags, but only decode
those fields which are being accessed.
This function is called when we retrieve a field on access, so we only pay
the penalty once, and cache the result.
This function is HOT.
Args:
value: A parameter stored in the wire format for this type.
Returns:
The parameter encoded in the python format representation.
"""
return value
def ConvertToWireFormat(self, value):
"""Convert the parameter into the internal storage format.
This function is the inverse of ConvertFromWireFormat().
This function is HOT.
Args:
value: A python format representation of the value as coerced by the
Validate() method. This is type specific, but always the same.
Returns:
The parameter encoded in the wire format representation.
"""
return value
def _FormatDescriptionComment(self):
result = "".join(["\n // %s\n"%x for x in self.description.splitlines()])
return result
def _FormatDefault(self):
return " [default = %s]" % self.GetDefault()
def _FormatField(self):
result = " optional %s %s = %s%s" % (
self.proto_type_name, self.name, self.field_number,
self._FormatDefault())
return result + ";\n"
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def Format(self, value):
"""A Generator for display lines representing value."""
yield str(value)
class ProtoUnknown(ProtoType):
"""A type descriptor for unknown fields.
We keep unknown fields with this type descriptor so we can re-serialize them
again. This way if we parse a protobuf with fields we dont know, we maintain
those upon serialization.
"""
def __init__(self, encoded_tag=None, **unused_kwargs):
self.encoded_tag = encoded_tag
def Write(self, stream, value):
stream.write(self.encoded_tag)
stream.write(value)
class ProtoString(ProtoType):
"""A string encoded in a protobuf."""
wire_type = WIRETYPE_LENGTH_DELIMITED
# This descriptor describes unicode strings.
type = rdfvalue.RDFString
def __init__(self, default=u"", **kwargs):
# Strings default to "" if not specified.
super(ProtoString, self).__init__(default=default, **kwargs)
def Validate(self, value, **_):
"""Validates a python format representation of the value."""
# We only accept a base string or unicode object here. (Should we also
# accept RDFString?)
if not (value.__class__ is str or value.__class__ is unicode):
raise type_info.TypeValueError("%s not a valid string" % value)
# A String means a unicode String. We must be dealing with unicode strings
# here and the input must be encodable as a unicode object.
try:
return unicode(value)
except UnicodeError:
raise type_info.TypeValueError("Not a valid unicode string")
def Write(self, stream, value):
stream.write(self.tag_data)
VarintWriter(stream.write, len(value))
stream.write(value)
def Read(self, buff, index):
length, index = VarintReader(buff, index)
return buff[index:index+length], index+length
def ConvertFromWireFormat(self, value):
"""Internally strings are utf8 encoded."""
try:
return unicode(value, "utf8")
except UnicodeError:
raise rdfvalue.DecodeError("Unicode decoding error")
def ConvertToWireFormat(self, value):
"""Internally strings are utf8 encoded."""
return value.encode("utf8")
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def _FormatDefault(self):
if self.GetDefault():
return " [default = %r]" % self.GetDefault()
else:
return ""
def Format(self, value):
yield repr(value)
class ProtoBinary(ProtoType):
"""A binary string encoded in a protobuf."""
wire_type = WIRETYPE_LENGTH_DELIMITED
# This descriptor describes strings.
type = rdfvalue.RDFString
proto_type_name = "bytes"
def __init__(self, default="", **kwargs):
# Byte strings default to "" if not specified.
super(ProtoBinary, self).__init__(default=default, **kwargs)
def Validate(self, value):
if value.__class__ is not str:
raise type_info.TypeValueError("%s not a valid string" % value)
return value
def Write(self, stream, value):
stream.write(self.tag_data)
VarintWriter(stream.write, len(value))
stream.write(value)
def Read(self, buff, index):
length, index = VarintReader(buff, index)
return buff[index:index+length], index+length
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def Format(self, value):
yield repr(value)
def _FormatDefault(self):
if self.GetDefault():
return " [default = %r]" % self.GetDefault()
else:
return ""
class ProtoUnsignedInteger(ProtoType):
"""An unsigned VarInt encoded in the protobuf."""
wire_type = WIRETYPE_VARINT
# This descriptor describes integers.
type = rdfvalue.RDFInteger
proto_type_name = "uint64"
def __init__(self, default=0, **kwargs):
# Integers default to 0 if not specified.
super(ProtoUnsignedInteger, self).__init__(default=default, **kwargs)
def Validate(self, value, **_):
if not isinstance(value, (int, long)):
raise type_info.TypeValueError("Invalid value %s for Integer" % value)
return value
def Write(self, stream, value):
stream.write(self.tag_data)
VarintWriter(stream.write, value)
def Read(self, buff, index):
return VarintReader(buff, index)
def _FormatDefault(self):
if self.GetDefault():
return " [default = %r]" % self.GetDefault()
else:
return ""
class ProtoSignedInteger(ProtoUnsignedInteger):
"""A signed VarInt encoded in the protobuf.
Note: signed VarInts are more expensive than unsigned VarInts.
"""
proto_type_name = "int64"
def Write(self, stream, value):
stream.write(self.tag_data)
SignedVarintWriter(stream.write, value)
def Read(self, buff, index):
return SignedVarintReader(buff, index)
class ProtoFixed32(ProtoUnsignedInteger):
"""A 32 bit fixed unsigned integer.
The wire format is a 4 byte string, while the python type is a long.
"""
_size = 4
proto_type_name = "sfixed32"
wire_type = WIRETYPE_FIXED32
def Write(self, stream, value):
stream.write(self.tag_data)
stream.write(value)
def Read(self, buff, index):
return buff[index:index+self._size], index+self._size
def ConvertToWireFormat(self, value):
return struct.pack("<L", long(value))
def ConvertFromWireFormat(self, value):
return struct.unpack("<L", value)[0]
class ProtoFixed64(ProtoFixed32):
_size = 8
proto_type_name = "sfixed64"
wire_type = WIRETYPE_FIXED64
def ConvertToWireFormat(self, value):
return struct.pack("<Q", long(value))
def ConvertFromWireFormat(self, value):
return struct.unpack("<Q", value)[0]
class ProtoFixedU32(ProtoFixed32):
"""A 32 bit fixed unsigned integer.
The wire format is a 4 byte string, while the python type is a long.
"""
proto_type_name = "fixed32"
def ConvertToWireFormat(self, value):
return struct.pack("<l", long(value))
def ConvertFromWireFormat(self, value):
return struct.unpack("<l", value)[0]
class ProtoFloat(ProtoFixed32):
"""A float.
The wire format is a 4 byte string, while the python type is a float.
"""
proto_type_name = "float"
def Validate(self, value, **_):
if not isinstance(value, (int, long, float)):
raise type_info.TypeValueError("Invalid value %s for Float" % value)
return value
def ConvertToWireFormat(self, value):
return struct.pack("<f", float(value))
def ConvertFromWireFormat(self, value):
return struct.unpack("<f", value)[0]
class ProtoDouble(ProtoFixed64):
"""A double.
The wire format is a 8 byte string, while the python type is a float.
"""
proto_type_name = "double"
def Validate(self, value, **_):
if not isinstance(value, (int, long, float)):
raise type_info.TypeValueError("Invalid value %s for Integer" % value)
return value
def ConvertToWireFormat(self, value):
return struct.pack("<d", float(value))
def ConvertFromWireFormat(self, value):
return struct.unpack("<d", value)[0]
class Enum(int):
"""A class that wraps enums.
Enums are just integers, except when printed they have a name.
"""
def __new__(cls, val, name=None):
instance = super(Enum, cls).__new__(cls, val)
instance.name = name or str(val)
return instance
def __str__(self):
return self.name
def __unicode__(self):
return unicode(self.name)
class ProtoEnum(ProtoSignedInteger):
"""An enum native proto type.
This is really encoded as an integer but only certain values are allowed.
"""
def __init__(self, enum_name=None, enum=None, **kwargs):
super(ProtoEnum, self).__init__(**kwargs)
if enum_name is None:
raise type_info.TypeValueError("Enum groups must be given a name.")
self.enum_name = enum_name
self.proto_type_name = enum_name
if isinstance(enum, EnumContainer):
enum = enum.enum_dict
self.enum = enum or {}
self.reverse_enum = {}
for k, v in enum.iteritems():
if not (v.__class__ is int or v.__class__ is long):
raise type_info.TypeValueError("Enum values must be integers.")
self.reverse_enum[v] = k
def Validate(self, value, **_):
"""Check that value is a valid enum."""
# None is a valid value - it means the field is not set.
if value is None:
return
# If the value is a string we need to try to convert it to an integer.
if value.__class__ is str:
value = self.enum.get(value)
if value is None:
raise type_info.TypeValueError(
"Value %s is not a valid enum value for field %s" % (
value, self.name))
return Enum(value, name=self.reverse_enum.get(value))
def Definition(self):
"""Return a string with the definition of this field."""
result = self._FormatDescriptionComment()
result += " enum %s {\n" % self.enum_name
for k, v in sorted(self.reverse_enum.items()):
result += " %s = %s;\n" % (v, k)
result += " }\n"
result += self._FormatField()
return result
def Format(self, value):
yield self.reverse_enum.get(value, str(value))
def ConvertToWireFormat(self, value):
return int(value)
def ConvertFromWireFormat(self, value):
return Enum(value, name=self.reverse_enum.get(value))
class ProtoBoolean(ProtoEnum):
"""A Boolean."""
def __init__(self, **kwargs):
super(ProtoBoolean, self).__init__(
enum_name="Bool", enum=dict(True=1, False=0), **kwargs)
self.proto_type_name = "bool"
class ProtoNested(ProtoType):
"""A nested RDFProtoStruct inside the field."""
wire_type = WIRETYPE_START_GROUP
closing_tag_data = None
# We need to be able to perform late binding for nested protobufs in case they
# refer to a protobuf which is not yet defined.
_type = None
def __init__(self, nested=None, named_nested_type=None, **kwargs):
super(ProtoNested, self).__init__(**kwargs)
if nested and not issubclass(nested, RDFProtoStruct):
raise type_info.TypeValueError(
"Only RDFProtoStructs can be nested, not %s" % nested.__name__)
self._type = nested
self.named_nested_type = named_nested_type
if self._type:
self.proto_type_name = self.type.__name__
# Pre-calculate the closing tag data.
self.closing_tag = ((self.field_number << 3) | WIRETYPE_END_GROUP)
tmp = cStringIO.StringIO()
VarintWriter(tmp.write, self.closing_tag)
self.closing_tag_data = tmp.getvalue()
@property
def type(self):
"""If the nested type is not known at definition time, resolve it now."""
if self._type is None:
self._type = getattr(rdfvalue, self.named_nested_type)
self.proto_type_name = self._type.__name__
if self._type is None:
raise rdfvalue.DecodeError(
"Unable to resolve nested member %s" % self.named_nested_type)
return self._type
def IsDirty(self, proto):
"""Return and clear the dirty state of the python object."""
if proto.dirty:
return True
for python_format, _, type_descriptor in proto.GetRawData().itervalues():
if python_format is not None and type_descriptor.IsDirty(python_format):
proto.dirty = True
return True
return False
def GetDefault(self):
"""When a nested proto is accessed, default to an empty one."""
return self.type()
def Validate(self, value):
# We may coerce it to the correct type.
if value.__class__ is not self.type:
try:
value = self.type(value)
except rdfvalue.InitializeError:
raise type_info.TypeValueError(
"Field %s must be of type %s" % (self.name, self.type.__name__))
return value
def Write(self, stream, value):
"""Serialize the nested protobuf value into the stream."""
stream.write(self.tag_data)
raw_data = value.GetRawData()
for name in raw_data:
python_format, wire_format, type_descriptor = raw_data[name]
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
# We do not bother to cache the wire format because usually a protobuf
# is only serialized once and then discarded, so keeping the wire
# formats around does not give a good cache hit rate.
type_descriptor.Write(stream, wire_format)
stream.write(self.closing_tag_data)
def Skip(self, encoded_tag, buff, index):
"""Skip the field at index."""
tag_type = ORD_MAP[encoded_tag[0]] & TAG_TYPE_MASK
# We dont need to actually understand the data, we just need to figure out
# where the end of the unknown field is so we can preserve the data. When we
# write these fields back (With their encoded tag) they should be still
# valid.
if tag_type == WIRETYPE_VARINT:
_, index = ReadTag(buff, index)
elif tag_type == WIRETYPE_FIXED64:
index += 8
elif tag_type == WIRETYPE_FIXED32:
index += 4
elif tag_type == WIRETYPE_LENGTH_DELIMITED:
length, start = VarintReader(buff, index)
index = start + length
# Skip an entire nested protobuf - This calls into Skip() recursively.
elif tag_type == WIRETYPE_START_GROUP:
start = index
while index < len(buff):
group_encoded_tag, index = ReadTag(buff, index)
if (ORD_MAP[group_encoded_tag[0]] & TAG_TYPE_MASK ==
WIRETYPE_END_GROUP):
break
# Recursive call to skip the next field.
index = self.Skip(group_encoded_tag, buff, index)
else:
raise rdfvalue.DecodeError("Unexpected Tag.")
# The data to be written includes the encoded_tag and the decoded data
# together.
return index
def ReadIntoObject(self, buff, index, value_obj, length=None):
"""Reads all tags until the next end group and store in the value_obj."""
raw_data = value_obj.GetRawData()
buffer_len = length or len(buff)
while index < buffer_len:
encoded_tag, index = ReadTag(buff, index)
# This represents the closing tag group for the enclosing protobuf.
if encoded_tag == self.closing_tag_data:
break
type_info_obj = value_obj.type_infos_by_encoded_tag.get(encoded_tag)
# If the tag is not found we need to skip it. Skipped fields are
# inaccessible to this actual object, because they have no type info
# describing them, however they are still stored in the raw data
# representation because they will be re-serialized back. This way
# programs which simply read protobufs and write them back do not need to
# know all the fields, some of which were defined in a later version of
# the application. In order to avoid having to worry about repeated fields
# here, we just insert them into the raw data dict with a key which should
# be unique.
if type_info_obj is None:
start = index
end = self.Skip(encoded_tag, buff, start)
# Record an unknown field as a generic ProtoType. The key is unique and
# ensures we do not collide the dict on repeated fields of the encoded
# tag. Note that this field is not really accessible using Get() and
# does not have a python format representation. It will be written back
# using the same wire format it was read with.
raw_data[index] = (None, buff[start:end],
ProtoUnknown(encoded_tag=encoded_tag))
index = end
continue
value, index = type_info_obj.Read(buff, index)
if type_info_obj.__class__ is ProtoList:
value_obj.Get(type_info_obj.name).Append(wire_format=value)
else:
raw_data[type_info_obj.name] = (None, value, type_info_obj)
return index
def Read(self, buff, index):
"""Parse a nested protobuf."""
# Make new instance and parse the data into it.
result = self.type()
index = self.ReadIntoObject(buff, index, result)
return result, index
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def _FormatField(self):
result = " optional %s %s = %s" % (self.proto_type_name,
self.name, self.field_number)
return result + ";\n"
def Format(self, value):
for line in value.Format():
yield " %s" % line
class ProtoEmbedded(ProtoNested):
"""A field may be embedded as a serialized protobuf.
Embedding is more efficient than nesting since the emebedded protobuf does not
need to be parsed at all, if the user does not access any elements in it.
Embedded protobufs are simply serialized as bytes using the wire format
WIRETYPE_LENGTH_DELIMITED. Hence the wire format is a simple python string,
but the python format representation is an RDFProtoStruct.
"""
wire_type = WIRETYPE_LENGTH_DELIMITED
def ConvertFromWireFormat(self, value):
"""The wire format is simply a string."""
result = self.type()
self.ReadIntoObject(value, 0, result)
return result
def ConvertToWireFormat(self, value):
"""Encode the nested protobuf into wire format."""
output = cStringIO.StringIO()
for entry in value.GetRawData().itervalues():
python_format, wire_format, type_descriptor = entry
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
type_descriptor.Write(output, wire_format)
return output.getvalue()
def Write(self, stream, value):
"""Serialize this protobuf as an embedded protobuf."""
stream.write(self.tag_data)
VarintWriter(stream.write, len(value))
stream.write(value)
def Read(self, buff, index):
length, index = VarintReader(buff, index)
return buff[index:index+length], index+length
class RepeatedFieldHelper(object):
"""A helper for the RDFProto to handle repeated fields.
This helper is intended to only be constructed from the RDFProto class.
"""
__metaclass__ = registry.MetaclassRegistry
dirty = False
def __init__(self, wrapped_list=None, type_descriptor=None):
"""Constructor.
Args:
wrapped_list: The list within the protobuf which we wrap.
type_descriptor: A type descriptor describing the type of the list
elements..
Raises:
AttributeError: If parameters are not valid.
"""
if wrapped_list is None:
self.wrapped_list = []
elif wrapped_list.__class__ is RepeatedFieldHelper:
self.wrapped_list = wrapped_list.wrapped_list
else:
self.wrapped_list = wrapped_list
if type_descriptor is None:
raise AttributeError("type_descriptor not specified.")
self.type_descriptor = type_descriptor
def IsDirty(self):
"""Is this repeated item dirty?
This is used to invalidate any caches that our owners have of us.
Returns:
True if this object is dirty.
"""
if self.dirty:
return True
# If any of the items is dirty we are also dirty.
for item in self.wrapped_list:
if self.type_descriptor.IsDirty(item[0]):
self.dirty = True
return True
return False
def Copy(self):
return RepeatedFieldHelper(wrapped_list=self.wrapped_list[:],
type_descriptor=self.type_descriptor)
def Append(self, rdf_value=None, wire_format=None, **kwargs):
"""Append the value to our internal list."""
if rdf_value is None and wire_format is None:
rdf_value = self.type_descriptor.type(**kwargs)
elif rdf_value is not None:
# Coerce the value to the required type.
try:
rdf_value = self.type_descriptor.Validate(rdf_value, **kwargs)
except (TypeError, ValueError):
raise type_info.TypeValueError(
"Assignment value must be %s, but %s can not "
"be coerced." % (self.type_descriptor.proto_type_name,
type(rdf_value)))
self.wrapped_list.append((rdf_value, wire_format))
return rdf_value
def Pop(self, item):
result = self[item]
self.wrapped_list.pop(item)
return result
def Extend(self, iterable):
for i in iterable:
self.Append(rdf_value=i)
append = utils.Proxy("Append")
remove = utils.Proxy("Remove")
def __getitem__(self, item):
# Ensure we handle slices as well.
if item.__class__ is slice:
result = []
for i in range(*item.indices(len(self))):
result.append(self.wrapped_list[i])
return self.__class__(
wrapped_list=result, type_descriptor=self.type_descriptor)
python_format, wire_format = self.wrapped_list[item]
if python_format is None:
python_format = self.type_descriptor.ConvertFromWireFormat(wire_format)
self.wrapped_list[item] = (python_format, wire_format)
return python_format
def __len__(self):
return len(self.wrapped_list)
def __ne__(self, other):
return not self == other # pylint: disable=g-comparison-negation
def __eq__(self, other):
if len(self) != len(other):
return False
for x, y in zip(self, other):
if x != y:
return False
return True
def __str__(self):
result = []
result.append("'%s': [" % self.type_descriptor.name)
for element in self:
for line in self.type_descriptor.Format(element):
result.append(" %s" % line)
result.append("]")
return "\n".join(result)
class ProtoList(ProtoType):
"""A repeated type."""
def __init__(self, delegate, **kwargs):
self.delegate = delegate
if not isinstance(delegate, ProtoType):
raise AttributeError(
"Delegate class must derive from ProtoType, not %s" %
delegate.__class__.__name__)
self.wire_type = delegate.wire_type
super(ProtoList, self).__init__(name=delegate.name,
description=delegate.description,
field_number=delegate.field_number)
def IsDirty(self, value):
return value.IsDirty()
def GetDefault(self):
# By default an empty RepeatedFieldHelper.
return RepeatedFieldHelper(type_descriptor=self.delegate)
def Validate(self, value):
"""Check that value is a list of the required type."""
# Assigning from same kind can allow us to skip verification since all
# elements in a RepeatedFieldHelper already are coerced to the delegate
# type. In that case we just make a copy. This only works when the value
# wraps the same type as us.
if (value.__class__ is RepeatedFieldHelper and
value.type_descriptor is self.delegate):
result = value.Copy()
# Make sure the base class finds the value valid.
else:
# The value may be a generator here, so we just iterate over it.
try:
result = RepeatedFieldHelper(type_descriptor=self.delegate)
result.Extend(value)
except ValueError:
raise type_info.TypeValueError("Field %s must be a list" % self.name)
return result
def Write(self, stream, value):
for python_format, wire_format in value.wrapped_list:
if wire_format is None or (python_format and
value.type_descriptor.IsDirty(python_format)):
wire_format = value.type_descriptor.ConvertToWireFormat(python_format)
value.type_descriptor.Write(stream, wire_format)
def Read(self, buff, index):
return self.delegate.Read(buff, index)
def Format(self, value):
yield "["
for element in value:
for line in self.delegate.Format(element):
yield " %s" % line
yield "]"
def _FormatField(self):
result = " repeated %s %s = %s" % (
self.delegate.proto_type_name, self.name, self.field_number)
return result + ";\n"
class ProtoRDFValue(ProtoBinary):
"""Serialize arbitrary rdfvalue members.
RDFValue members can be serialized in a number of different ways according to
their preferred data_store_type member. We map the descriptions in
data_store_type into a suitable protobuf serialization for optimal
serialization. We therefore use a delegate type descriptor to best convert
from the RDFValue to the wire type. For example, an RDFDatetime is best
represented as an integer (number of microseconds since the epoch). Hence
RDFDatetime.SerializeToDataStore() will return an integer, and the delegate
will be ProtoUnsignedInteger().
To convert from the RDFValue python type to the delegate's wire type we
therefore need to make two conversions:
1) Our python format is the RDFValue -> intermediate data store format using
RDFValue.SerializeToDataStore(). This will produce a python object which is
the correct python format for the delegate primitive type descriptor.
2) Use the delegate to obtain the wire format of its own python type
(i.e. self.delegate.ConvertToWireFormat())
"""
_type = None
_named_type = None
_PROTO_DATA_STORE_LOOKUP = dict(
bytes=ProtoBinary,
unsigned_integer=ProtoUnsignedInteger,
integer=ProtoUnsignedInteger,
signed_integer=ProtoSignedInteger,
string=ProtoString)
def __init__(self, rdf_type=None, **kwargs):
if isinstance(rdf_type, basestring):
# If this fails to be resolved at this time, we resolve it at runtime
# later.
self._type = getattr(rdfvalue, rdf_type, None)
self._named_type = rdf_type
elif rdf_type is not None:
self._type = rdf_type
else:
type_info.TypeValueError("An rdf_type must be specified.")
# Now decide how we pack the rdfvalue into the protobuf and create a
# delegate descriptor to control that.
delegate_cls = self._PROTO_DATA_STORE_LOOKUP[self.type.data_store_type]
self.delegate = delegate_cls(**kwargs)
# Our wiretype is the same as the delegate's.
self.wire_type = self.delegate.wire_type
self.proto_type_name = self.delegate.proto_type_name
super(ProtoRDFValue, self).__init__(**kwargs)
@property
def type(self):
"""If the rdfvalue type is not known at definition time, resolve it now."""
if self._type is None:
self._type = getattr(rdfvalue, self._named_type, None)
if self._type is None:
raise rdfvalue.DecodeError(
"Unable to resolve rdfvalue %s" % self._named_type)
return self._type
def IsDirty(self, python_format):
"""Return the dirty state of the python object."""
return python_format.dirty
def Definition(self):
return ("\n // Semantic Type: %s" %
self.type.__name__) + self.delegate.Definition()
def Read(self, buff, index):
return self.delegate.Read(buff, index)
def Write(self, buff, index):
return self.delegate.Write(buff, index)
def Validate(self, value):
# Try to coerce into the correct type:
if value.__class__ is not self.type:
try:
value = self.type(value)
except rdfvalue.DecodeError as e:
raise type_info.TypeValueError(e)
return value
def ConvertFromWireFormat(self, value):
# Wire format should be compatible with the data_store_type for the
# rdfvalue. We use the delegate type_info to perform the conversion.
value = self.delegate.ConvertFromWireFormat(value)
result = self.type()
result.ParseFromDataStore(value)
return result
def ConvertToWireFormat(self, value):
return self.delegate.ConvertToWireFormat(value.SerializeToDataStore())
def _FormatField(self):
result = " optional %s %s = %s" % (self.proto_type_name,
self.name, self.field_number)
return result + ";\n"
def Format(self, value):
yield "%s:" % self.type.__name__
for line in str(value).splitlines():
yield " %s" % line
class AbstractSerlializer(object):
"""A serializer which parses to/from the intermediate python objects."""
def SerializeToString(self, value):
"""Serialize the RDFStruct object into a string."""
def ParseFromString(self, value_obj, string):
"""Parse the string and set attributes in the value_obj."""
class JsonSerlializer(AbstractSerlializer):
"""A serializer based on Json."""
def _SerializedToIntermediateForm(self, data):
"""Convert to an intermediate form suitable for JSON encoding.
Since JSON is unable to encode arbitrary data, we need to convert the data
into something which is valid JSON.
Args:
data: An arbitrary data from the RDFStruct's internal form.
Returns:
This function returns a valid JSON serializable object, which can, in turn
be reversed using the _ParseFromIntermediateForm() method.
Raises:
ValueError: If data can not be suitably encoded.
"""
# These types can be serialized by json.
if isinstance(data, (int, long, unicode)):
return data
# We encode an RDFStruct as a dict.
elif isinstance(data, rdfvalue.RDFStruct):
result = dict(__n=data.__class__.__name__)
for entry in data.GetRawData().itervalues():
python_format, wire_format, type_descriptor = entry
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
result[type_descriptor.field_number] = (
self._SerializedToIntermediateForm(wire_format))
return result
# A RepeatedFieldHelper is serialized as a list of objects.
elif isinstance(data, RepeatedFieldHelper):
return [self._SerializedToIntermediateForm(x) for x in data]
# A byte string must be encoded for json since it can not encode arbitrary
# binary data.
elif isinstance(data, str):
return data.encode("base64")
# Should never get here.
raise ValueError("Unable to serialize internal type %s" % data)
def SerializeToString(self, data):
"""Convert the internal data structure to json compatible form."""
return json.dumps(self._SerializedToIntermediateForm(data))
def _ParseFromIntermediateForm(self, data):
result = {}
for k, v in data.iteritems():
if isinstance(v, (int, long, unicode)):
result[k] = v
elif isinstance(v, dict):
rdfvalue_class = self.classes.get(v["t"])
# Just ignore RDFValues we dont understand.
if rdfvalue_class is not None:
tmp = result[k] = rdfvalue_class()
tmp.SetRawData(self._ParseFromIntermediateForm(v["d"]))
elif isinstance(v, str):
result[k] = v.decode("base64")
return result
def ParseFromString(self, value_obj, string):
value_obj.SetRawData(self._ParseFromIntermediateForm(json.loads(string)))
class RDFStructMetaclass(registry.MetaclassRegistry):
"""This is a metaclass which registers new RDFProtoStruct instances."""
def __init__(cls, name, bases, env_dict): # pylint: disable=no-self-argument
super(RDFStructMetaclass, cls).__init__(name, bases, env_dict)
cls.type_infos = type_info.TypeDescriptorSet()
cls.type_infos_by_field_number = {}
cls.type_infos_by_encoded_tag = {}
# Build the class by parsing an existing protobuf class.
if cls.protobuf is not None:
cls.DefineFromProtobuf(cls.protobuf)
# Pre-populate the class using the type_infos class member.
if cls.type_description is not None:
for field_desc in cls.type_description:
cls.AddDescriptor(field_desc)
cls._class_attributes = set(dir(cls))
class RDFStruct(rdfvalue.RDFValue):
"""An RDFValue object which contains fields like a struct.
Struct members contain values such as integers, strings etc. These are stored
in an internal data structure.
A value can be in two states, the wire format is a serialized format closely
resembling the state it appears on the wire. The Decoded format is the
representation closely representing an internal python type. The idea is that
converting from a serialized wire encoding to the wire format is as cheap as
possible. Similarly converting from a python object to the python
representation is also very cheap.
Lazy evaluation occurs when we need to obtain the python representation of a
decoded field. This allows us to skip the evaluation of complex data.
For example, suppose we have a protobuf with several "string" fields
(i.e. unicode objects). The wire format for a "string" field is a UTF8 encoded
binary string, but the python object is a unicode object.
Normally when parsing the protobuf we can extract the wire format
representation very cheaply, but conversion to a unicode object is quite
expensive. If the user never access the specific field, we can keep the
internal representation in wire format and not convert it to a unicode object.
"""
__metaclass__ = RDFStructMetaclass
# This can be populated with a type_info.TypeDescriptorSet() object to
# initialize the class.
type_description = None
# This class can be defined using the protobuf definition language (e.g. a
# .proto file). If defined here, we parse the .proto file for the message with
# the exact same class name and add the field descriptions from it.
definition = None
# This class can be defined in terms of an existing annotated regular
# protobuf. See RDFProtoStruct.DefineFromProtobuf().
protobuf = None
# This is where the type infos are constructed.
type_infos = None
_data = None
# This is the serializer which will be used by this class. It can be
# interchanged or overriden as required.
_serializer = JsonSerlializer()
def __init__(self, initializer=None, age=None, **kwargs):
# Maintain the order so that parsing and serializing a proto does not change
# the serialized form.
self._data = {}
self._age = age
for arg, value in kwargs.iteritems():
# self.Set(arg, value)
if not hasattr(self.__class__, arg):
raise AttributeError(
"Proto %s has no field %s" % (self.__class__.__name__, arg))
setattr(self, arg, value)
if initializer is None:
return
elif initializer.__class__ == self.__class__:
self.ParseFromString(initializer.SerializeToString())
elif initializer.__class__ is str:
try:
self.ParseFromString(initializer)
except rdfvalue.DecodeError:
logging.error("Unable to parse: %s.", initializer.encode("hex"))
raise
else:
raise ValueError("%s can not be initialized from %s" % (
self.__class__.__name__, type(initializer)))
def Clear(self):
"""Clear all the fields."""
self._data = {}
def HasField(self, field_name):
"""Checks if the field exists."""
return field_name in self._data
def Copy(self):
"""Make an efficient copy of this protobuf."""
return copy.deepcopy(self)
def __copy__(self):
result = self.__class__()
result.SetRawData(copy.copy(self._data))
return result
def __deepcopy__(self, memo):
result = self.__class__()
result.SetRawData(copy.deepcopy(self._data, memo))
return result
def GetRawData(self):
"""Retrieves the raw python representation of the object.
This is normally only used by serializers which are tightly coupled with the
raw data representation. External users should not make use of the internal
raw data structures.
Returns:
the raw python object representation (a dict).
"""
return self._data
def ListFields(self):
"""Iterates over the fields which are actually set.
Yields:
a tuple of (type_descriptor, value) for each field which is set.
"""
for type_descriptor in self.type_infos:
if type_descriptor.name in self._data:
yield type_descriptor, self.Get(type_descriptor.name)
def SetRawData(self, data):
self._data = data
self.dirty = True
def SerializeToString(self):
return self._serializer.SerializeToString(self)
def ParseFromString(self, string):
self._serializer.ParseFromString(self, string)
self.dirty = True
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if len(self._data) != len(other.GetRawData()):
return False
for field in self._data:
if self.Get(field) != other.Get(field):
return False
return True
def __ne__(self, other):
return not self == other # pylint: disable=g-comparison-negation
def Format(self):
"""Format a message in a human readable way."""
yield "message %s {" % self.__class__.__name__
for k, (python_format, wire_format,
type_descriptor) in sorted(self.GetRawData().items()):
if python_format is None:
python_format = type_descriptor.ConvertFromWireFormat(wire_format)
# Skip printing of unknown fields.
if isinstance(k, basestring):
prefix = k + " :"
for line in type_descriptor.Format(python_format):
yield " %s %s" % (prefix, line)
prefix = ""
yield "}"
def __str__(self):
return "\n".join(self.Format())
def __dir__(self):
"""Add the virtualized fields to the console's tab completion."""
return (dir(super(RDFStruct, self)) +
[x.name for x in self.type_infos])
def _Set(self, attr, value, type_descriptor):
# A value of None means we clear the field.
if value is None:
self._data.pop(attr, None)
return
# Validate the value and obtain the python format representation.
value = type_descriptor.Validate(value)
# Store the lazy value object.
self._data[attr] = (value, None, type_descriptor)
# Make sure to invalidate our parent's cache if needed.
self.dirty = True
return value
def Set(self, attr, value):
"""Sets the attribute in to the value."""
type_info_obj = self.type_infos.get(attr)
if type_info_obj is None:
raise AttributeError("Field %s is not known." % attr)
return self._Set(attr, value, type_info_obj)
def SetWireFormat(self, attr, value):
"""Sets the attribute providing the serialized representation."""
type_info_obj = self.type_infos.get(attr)
if type_info_obj is None:
raise AttributeError("Field %s is not known." % attr)
self._data[attr] = (None, value, type_info_obj)
# Make sure to invalidate our parent's cache if needed.
self.dirty = True
def Get(self, attr):
"""Retrieve the attribute specified."""
entry = self._data.get(attr)
# We dont have this field, try the defaults.
if entry is None:
type_info_obj = self.type_infos.get(attr)
if type_info_obj is None:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
# Assign the default value now.
default = type_info_obj.GetDefault()
if default is None:
return
return self.Set(attr, default)
python_format, wire_format, type_descriptor = entry
# Decode on demand and cache for next time.
if python_format is None:
python_format = type_descriptor.ConvertFromWireFormat(wire_format)
self._data[attr] = (python_format, wire_format, type_descriptor)
return python_format
def GetWireFormat(self, attr):
"""Retrieve the attribute specified in serialized form."""
entry = self._data.get(attr)
# We dont have this field, try the defaults.
if entry is None:
return ""
python_format, wire_format, type_descriptor = entry
if wire_format is None:
wire_format = python_format.SerializeToDataStore()
self._data[attr] = (python_format, wire_format, type_descriptor)
return wire_format
@classmethod
def AddDescriptor(cls, field_desc):
if not isinstance(field_desc, ProtoType):
raise type_info.TypeValueError(
"%s field '%s' should be of type ProtoType" % (
cls.__name__, field_desc.name))
cls.type_infos_by_field_number[field_desc.field_number] = field_desc
cls.type_infos.Append(field_desc)
class ProtobufType(ProtoNested):
"""A type descriptor for the top level protobuf."""
def __init__(self):
self.tag_data = ""
self.closing_tag_data = ""
class ProtocolBufferSerializer(AbstractSerlializer):
"""A serializer based on protocol buffers."""
def __init__(self):
self.protobuf = ProtobufType()
def SerializeToString(self, data):
"""Serialize the RDFProtoStruct object into a string."""
stream = cStringIO.StringIO()
self.protobuf.Write(stream, data)
return stream.getvalue()
def ParseFromString(self, value_obj, string):
self.protobuf.ReadIntoObject(string, 0, value_obj)
class EnumValue(int):
"""An integer with a name."""
def __new__(cls, val, name=None):
inst = super(EnumValue, cls).__new__(cls, val)
inst.name = name
return inst
def __str__(self):
return self.name
class EnumContainer(object):
"""A data class to hold enum objects."""
def __init__(self, name=None, **kwargs):
self.reverse_enum = {}
self.name = name
for k, v in kwargs.items():
v = EnumValue(v, name=k)
self.reverse_enum[v] = k
setattr(self, k, v)
self.enum_dict = kwargs
class RDFProtoStruct(RDFStruct):
"""An RDFStruct which uses protobufs for serialization.
This implementation is faster than the standard protobuf library.
"""
_serializer = ProtocolBufferSerializer()
shortest_encoded_tag = 0
longest_encoded_tag = 0
# If set to a standard proto2 generated class, we introspect it and extract
# type descriptors from it. This allows this implementation to use an
# annotated .proto file to define semantic types.
protobuf = None
# This mapping is used to provide concrete implementations for semantic types
# annotated in the .proto file. This is a dict with keys being the semantic
# names, and values being the concrete implementations for these types.
# By default include standard semantic objects. Additional objects can be
# added if needed.
dependencies = dict(RDFURN=rdfvalue.RDFURN,
RDFDatetime=rdfvalue.RDFDatetime)
def GetFields(self, field_names):
"""Get a field embedded deeply in this object.
Args:
field_names: A list of field names to search.
Returns:
A list of values which match the field names.
"""
value = self
for field_name in field_names:
value = getattr(value, field_name)
return [value]
@classmethod
def EmitProto(cls):
"""Emits .proto file definitions."""
result = "message %s {\n" % cls.__name__
for _, desc in sorted(cls.type_infos_by_field_number.items()):
result += desc.Definition()
result += "}\n"
return result
@classmethod
def DefineFromProtobuf(cls, protobuf):
"""Add type info definitions from an existing protobuf.
We support building this class by copying definitions from an annotated
protobuf using the semantic protobuf. This is ideal for interoperability
with other languages and non-semantic protobuf implementations. In that case
it might be easier to simply annotate the .proto file with the relevant
semantic information.
Args:
protobuf: A generated protocol buffer class as produced by the protobuf
compiler.
"""
# We search through all the field descriptors and build type info
# descriptors from them.
for field in protobuf.DESCRIPTOR.fields:
type_descriptor = None
# Does this field have semantic options?
options = field.GetOptions().Extensions[semantic_pb2.sem_type]
kwargs = dict(description=options.description, name=field.name,
field_number=field.number)
if field.has_default_value:
kwargs["default"] = field.default_value
# This field is a non-protobuf semantic value.
if options.type and field.type != 11:
type_descriptor = ProtoRDFValue(rdf_type=options.type, **kwargs)
# A nested semantic protobuf of this type.
elif options.type and field.type == 11:
# Locate the semantic protobuf which is embedded here.
# TODO(user): If we can not find it, should we just go ahead and
# define it here?
nested = cls.classes.get(options.type)
if nested:
type_descriptor = ProtoEmbedded(nested=nested, **kwargs)
# Try to figure out what this field actually is from the descriptor.
elif field.type == 1:
type_descriptor = ProtoDouble(**kwargs)
elif field.type == 2: # Float
type_descriptor = ProtoFloat(**kwargs)
elif field.type == 3: # int64
type_descriptor = ProtoSignedInteger(**kwargs)
elif field.type == 5: # int32 is the same as int64 on the wire.
type_descriptor = ProtoSignedInteger(**kwargs)
elif field.type == 8: # Boolean
type_descriptor = ProtoBoolean(**kwargs)
elif field.type == 9: # string
type_descriptor = ProtoString(**kwargs)
elif field.type == 12: # bytes
type_descriptor = ProtoBinary(**kwargs)
elif field.type == 13: # unsigned integer
type_descriptor = ProtoUnsignedInteger(**kwargs)
elif field.type == 11 and field.message_type: # Another protobuf.
# Nested proto refers to itself.
if field.message_type.name == cls.protobuf.__name__:
type_descriptor = ProtoEmbedded(nested=cls, **kwargs)
else:
# Make sure that the nested protobuf is already defined as a semantic
# proto.
nested_class = cls.classes.get(field.message_type.name)
# If we get here we do not have the message already defined. This can
# happen for example if the message refers to another message which is
# not yet defined. We therefore create a descriptor with a name only
# and allow it to resolve the name to a class later.
if nested_class is None:
type_descriptor = ProtoEmbedded(
named_nested_type=field.message_type.name, **kwargs)
else:
type_descriptor = ProtoEmbedded(nested=nested_class, **kwargs)
elif field.enum_type: # It is an enum.
enum_desc = field.enum_type
enum_dict = dict((x.name, x.number) for x in enum_desc.values)
type_descriptor = ProtoEnum(enum_name=enum_desc.name, enum=enum_dict,
**kwargs)
# Attach the enum container to the class for easy reference:
setattr(cls, enum_desc.name,
EnumContainer(name=enum_desc.name, **enum_dict))
elif field.type == 4: # a uint64
type_descriptor = ProtoUnsignedInteger(**kwargs)
# If we do not recognize the type descriptor we ignore this field.
if type_descriptor is not None:
# If the field is repeated, wrap it in a ProtoList.
if field.label == 3:
type_descriptor = ProtoList(type_descriptor)
try:
cls.AddDescriptor(type_descriptor)
except Exception:
logging.error("Failed to parse protobuf %s", cls)
raise
else:
logging.error("Unknown field type for %s - Ignoring.", field.name)
@classmethod
def FromTextFormat(cls, text):
"""Parse this object from a text representation."""
tmp = cls.protobuf() # pylint: disable=not-callable
text_format.Merge(text, tmp)
return cls(tmp.SerializeToString())
@classmethod
def AddDescriptor(cls, field_desc):
"""Register this descriptor with the Proto Struct."""
if not isinstance(field_desc, ProtoType):
raise type_info.TypeValueError(
"%s field '%s' should be of type ProtoType" % (
cls.__name__, field_desc.name))
# Ensure this field number is unique:
if field_desc.field_number in cls.type_infos_by_field_number:
raise type_info.TypeValueError(
"Field number %s for field %s is not unique in %s" % (
field_desc.field_number, field_desc.name, cls.__name__))
# We store an index of the type info by tag values to speed up parsing.
cls.type_infos_by_field_number[field_desc.field_number] = field_desc
cls.type_infos_by_encoded_tag[field_desc.tag_data] = field_desc
cls.type_infos.Append(field_desc)
# This lambda is a class method so pylint: disable=protected-access
# This is much faster than __setattr__/__getattr__
setattr(cls, field_desc.name, property(
lambda self: self.Get(field_desc.name),
lambda self, x: self._Set(field_desc.name, x, field_desc),
None, field_desc.description))
| apache-2.0 | 7,757,022,768,187,491,000 | 30.026198 | 80 | 0.665978 | false |
sdispater/pendulum | tests/tz/zoneinfo/test_posix_timezone.py | 1 | 1969 | from pendulum.tz.zoneinfo.posix_timezone import JPosixTransition
from pendulum.tz.zoneinfo.posix_timezone import MPosixTransition
from pendulum.tz.zoneinfo.posix_timezone import posix_spec
def test_posix_spec_m():
spec = "CET-1CEST,M3.5.0,M10.5.0/3"
tz = posix_spec(spec)
assert tz.std_abbr == "CET"
assert tz.std_offset == 3600
assert tz.dst_abbr == "CEST"
assert tz.dst_offset == 7200
assert isinstance(tz.dst_start, MPosixTransition)
assert tz.dst_start.month == 3
assert tz.dst_start.week == 5
assert tz.dst_start.weekday == 0
assert tz.dst_start.offset == 7200
assert isinstance(tz.dst_end, MPosixTransition)
assert tz.dst_end.month == 10
assert tz.dst_end.week == 5
assert tz.dst_end.weekday == 0
assert tz.dst_end.offset == 3 * 3600
def test_posix_spec_m_no_abbr():
spec = "<+12>-12<+13>,M11.1.0,M1.2.1/147"
tz = posix_spec(spec)
assert tz.std_abbr == "+12"
assert tz.std_offset == 12 * 3600
assert tz.dst_abbr == "+13"
assert tz.dst_offset == 13 * 3600
assert isinstance(tz.dst_start, MPosixTransition)
assert tz.dst_start.month == 11
assert tz.dst_start.week == 1
assert tz.dst_start.weekday == 0
assert tz.dst_start.offset == 7200
assert isinstance(tz.dst_end, MPosixTransition)
assert tz.dst_end.month == 1
assert tz.dst_end.week == 2
assert tz.dst_end.weekday == 1
assert tz.dst_end.offset == 147 * 3600
def test_posix_spec_j_no_abbr():
spec = "<+0330>-3:30<+0430>,J80/0,J264/0"
tz = posix_spec(spec)
assert tz.std_abbr == "+0330"
assert tz.std_offset == 3 * 3600 + 30 * 60
assert tz.dst_abbr == "+0430"
assert tz.dst_offset == 4 * 3600 + 30 * 60
assert isinstance(tz.dst_start, JPosixTransition)
assert tz.dst_start.day == 80
assert tz.dst_start.offset == 0
assert isinstance(tz.dst_end, JPosixTransition)
assert tz.dst_end.day == 264
assert tz.dst_end.offset == 0
| mit | -4,709,097,366,466,081,000 | 29.292308 | 64 | 0.651092 | false |
flynx/pli | doc/style.py | 1 | 12070 | #=======================================================================
__version__ = '''0.1.00'''
__sub_version__ = '''20080920022138'''
__copyright__ = '''(c) Alex A. Naanou 2003-2007'''
#-----------------------------------------------------------------------
__doc__ = '''
this module is to document the recommended coding style in this project.
NOTE: this is partly derived from python style PEP8 <http://python.org/peps/pep-0008.html>
thus this is to be considered as a more specific extension to the PEP.
NOTE: in cases not defined by this text fall back to the PEP8.
NOTE: the rest of this module illustrates the styling and can be used as
an example.
General format:
- code should if possible be no more than 72 chars wide.
- all comments must be no more than 71 chars wide and if possible
justified, unless positional commenting of very long lines of code.
- all section separators must be exactly 72 chras wide.
Commenting culture:
- keep your comments current. the aim of comments is to help the
user, not to confuse him (never make anyone debug your comments
when what is needed is to understand your code!).
- keep your comments short. write just what is relevant and do not
make anyone search for what you are trying to say.
- be relevant. comment on the code/task at hand and not something
that is not in the direct vicinity, module or universe.
- be descriptive. describe why you are doing something rather than
what you are doing. the later is appropriate only if it will take
more time to get it form the code than form your comment (by the
average user and not by you!).
making the user read the same code twice in two languages is not
vary polite.
- warn and instruct if needed. it is a good idea to write out all
the issues that are relevant to a particular piece of code.
if you have a known bug ALWAYS write it out, with all the info
and references to the affected and if known the affecting code.
Commenting style:
- comments must always precede the commented code, and not follow it.
- there should be no blank lines separating the comment and the
commented code.
- use a '#' for general commenting of code.
- use '##' for temporary and commented-out code.
- use '# TODO ' to indicate normal priority todo tasks/reminders.
- use '# XXX ' to indicate issues of a certain priority that need
to be dealt with.
- High Priority Commenting (HP):
- use '##!!!' for general todo markers (may not be followed by text).
- use '##!!' as a start of HP remarks and notes
(may be used inside comments).
- use '##!! REWRITE !!##' style markers to indicate ASAP tasks/notes.
use the case of the message to indicate importance.
Ex:
##!! REWRITE !!##
##!! TEST !!##
##!! OPTIMIZE !!##
##!! revise !!##
...
- it is recommended to avoid comments on the same line as the code.
Ex:
foo = 1+2 # assign to foo.
- all comments must be no more than 71 chars wide and if possible justified,
unless positional commenting of very long lines of code.
Sections (outlining):
- each module may be composed of different levels of sections.
- a section in general may contain only one element (e.g. class, function ... etc.)
- if necessary separate parts of very long class definitions into
sub sections using blank lines.
- group all related sections into one module or section if possible.
- blank lines and sections:
- each main section should end with exactly 3 blank lines.
- each sub section should end with two blank lines.
- the code must follow section separator directly on the next
line with the exception of the declarative sections (e.g. imports,
constants, data, ...etc) which should be headed by a blank line.
- all definition sections (e.g. classes, functions, ...etc)
should have no blanks between the code and the section header.
- if any comments, remarks or warnings need to be present for a
section, they should directly follow the section separator and precede
the section code.
- module sections:
- the first section in a file is the version and copyright info.
this is obligatory, unless the module contains temporary, demo
or test code.
- the second section is the documentation section (this must
exist in every release module).
- the third section is the imports (optional).
- the optional data section.
NOTE: a data section should stat with a blank line.
- then the module contents section.
- optional test (if __name__ == '__main__') section.
- the last section in the module should define editor related
configuration (e.g. vim modlines ... etc.)
- the section header should contain the name of the element defined within
Ex:
#---------------------------------------------------------ElementName---
or
#=======================================================================
#-----------------------------------------------------------ClassName---
- Section Separators:
- Main sections:
#-----------------------------------------------------------------------
#---------------------------------------------------------ElementName---
#-----------------------------------------------------------------------
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - -ElementName- -
#=======================================================================
- Sub sections:
#---------------------------------------------------------ElementName---
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - -ElementName- -
- sction separators:
#-----------------------------------------------------------------------
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#.......................................................................
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
Element Naming Style:
this is a general naming style used, note that some code dependent rules may be applied.
Naming by Function/Pattern:
it is recommended that all elements that represent a given pattern
contain the pattern name in its name
Ex:
class MySuperClassFramework(object):
...
or
class MethodProxy(moo):
...
classes:
- utility
a utility class is an element that is used as a function (factory) in the code.
these may be all lowercase (like classmethod or staticmethod).
- base/structure
these are named as follows: first capital and every new word starts capital.
(Ex: MyClassName)
public/user methods and functions:
all lowercase without word separators, preferably no longer than 8 chars.
if the name must be longer or is not intended to be used very often then it
is acceptable to use underscores as separators.
avoid long public names for often used methods/functions at all cost!
(fall back to PEP8 when writing libs..)
variables:
all lowercase names with underscore as word separator.
- global constants
all capital with '_' as word separator (Ex: MY_GLOBAL_CONST_VAR)
- private (method)
use a leading underscore '_'
- private (class)
use a leading underscore '_' (avoid '__' if possible due to problems with inheritance)
- private (instance)
use a leading underscore '_' (avoid '__' if possible due to problems with inheritance)
General Coding Style:
Doc Strings:
Packages:
Modules:
Classes:
Methods:
Library code specifics:
Release Process:
Testing:
'''
#-----------------------------------------------------------------------
# NOTE: the ordering of the sections shown here is not strict, though it
# is recommended to follow these guidelines:
# - data section, first private then public.
# - functions, again first private then public.
# - classes, as before private and then public.
#
# a general exception is semantically organized code. then each
# separate section should follow the above guidelines.
#
# but if such sections exist in a module, it is a good idea to
# ask yourself a question if it would be more logical to split
# this module into several self-contained modules or a package.
#
# P.S. it is not a good idea to keep data/state as a module
# variable. constants are OK but avoid storing state in the
# library modules.
#-----------------------------------------------------------------------
# some constants defined...
CONSTAT_a = 'a'
CONSTAT_B = 'B'
#-----------------------------------------------------------------------
#-------------------------------------------_module_internal_function---
def _module_internal_function(args):
'''
'''
pass
#-----------------------------------------------------------------------
#-----------------------------------------------------public_function---
def public_function():
'''
'''
pass
#--------------------------------------------------------------dowork---
def dowork():
'''
'''
pass
#-----------------------------------------------------------------------
#-------------------------------------------------------------MyClass---
# here is an example of a class definition...
class MyClass(object):
'''
my simple example class.
'''
# class private data...
# some private state used here...
_private_state = None
# class public data...
# NOTE: all class level data should be documented, and avoid
# comments that simply read the next line...
# ...try to always use meaningful names.
# this is something that this class will sometime use...
public_something = None
# private methods...
def _privatemethod(self):
'''
this method is intended for internal use or possibly for use
by library extensions.
'''
pass
def _another_nonpublic_method(self):
'''
here is another acceptable non-public method name.
'''
pass
# public methods...
def publicmethod(self, arg1, arg2):
'''
and here is a good public method.
'''
pass
#--------------------------------------------------------MyOtherClass---
class MyOtherClass(MyClass):
'''
'''
# MyClass interface extensions...
def publicmethod(self, arg1, arg2):
'''
'''
pass
# specific methods...
def yetanothermethods(self):
'''
this name is not so good...
'''
pass
def another_method():
'''
this is a better name version for the above, unless this methods
is to be used often, then think of something 8-12 chars long.
'''
pass
#-----------------------------------------------------------------------
if __name__ == '__main__':
print __doc__
#=======================================================================
# vim:set ts=4 sw=4 nowrap expandtab :
| bsd-3-clause | -6,924,110,043,845,298,000 | 34.575758 | 98 | 0.510025 | false |
brunobell/elasticsearch-py | example/queries.py | 1 | 2797 | #!/usr/bin/env python
from __future__ import print_function
import logging
from dateutil.parser import parse as parse_date
from elasticsearch import Elasticsearch
def print_search_stats(results):
print('=' * 80)
print('Total %d found in %dms' % (results['hits']['total'], results['took']))
print('-' * 80)
def print_hits(results):
" Simple utility function to print results of a search query. "
print_search_stats(results)
for hit in results['hits']['hits']:
# get created date for a repo and fallback to authored_date for a commit
created_at = parse_date(hit['_source'].get('created_at', hit['_source']['authored_date']))
print('/%s/%s/%s (%s): %s' % (
hit['_index'], hit['_type'], hit['_id'],
created_at.strftime('%Y-%m-%d'),
hit['_source']['description'].replace('\n', ' ')))
print('=' * 80)
print()
# get trace logger and set level
tracer = logging.getLogger('elasticsearch.trace')
tracer.setLevel(logging.INFO)
tracer.addHandler(logging.FileHandler('/tmp/es_trace.log'))
# instantiate es client, connects to localhost:9200 by default
es = Elasticsearch()
print('Empty search:')
print_hits(es.search(index='git'))
print('Find commits that says "fix" without touching tests:')
result = es.search(
index='git',
doc_type='commits',
body={
'query': {
'bool': {
'must': {
'match': {'description': 'fix'}
},
'must_not': {
'term': {'files': 'test_elasticsearch'}
}
}
}
}
)
print_hits(result)
print('Last 8 Commits for elasticsearch-py:')
result = es.search(
index='git',
doc_type='commits',
body={
'query': {
'parent_id': {
'type': 'commits', 'id': 'elasticsearch-py'
}
},
'sort': [
{'committed_date': {'order': 'desc'}}
],
'size': 8
}
)
print_hits(result)
print('Stats for top 10 python committers:')
result = es.search(
index='git',
doc_type='commits',
body={
'size': 0,
'query': {
'has_parent': {
'parent_type': 'repos',
'query': {
'term': {
'tags': 'python'
}
}
}
},
'aggs': {
'committers': {
'terms': {
'field': 'committer.name.raw',
},
'aggs': {
'line_stats': {
'stats': {'field': 'stats.lines'}
}
}
}
}
}
)
print_search_stats(result)
for committer in result['aggregations']['committers']['buckets']:
print('%15s: %3d commits changing %6d lines' % (
committer['key'], committer['doc_count'], committer['line_stats']['sum']))
print('=' * 80)
| apache-2.0 | 283,234,526,146,128,860 | 24.427273 | 98 | 0.527351 | false |
felipenaselva/felipe.repository | script.module.placenta/lib/resources/lib/sources/en/icouchtuner.py | 1 | 4264 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re,traceback,urllib,urlparse,base64
import requests
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import log_utils
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['icouchtuner.to']
self.base_link = 'https://icouchtuner.to/'
self.search_link = '?s=%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
scrape = cleantitle.geturl(tvshowtitle).replace('-','+')
start_url = urlparse.urljoin(self.base_link, self.search_link %(scrape))
html = client.request(start_url)
results = client.parseDOM(html, 'div', attrs={'class':'post'})
for content in results:
show_url, url_text = re.compile('href="(.+?)" rel="bookmark" title="(.+?)"',re.DOTALL).findall(content)[0]
if cleantitle.get(tvshowtitle.translate(None, ':*?"\'\.<>|&!,')) in cleantitle.get(show_url):
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'url': show_url}
url = urllib.urlencode(url)
return url
return
except:
failure = traceback.format_exc()
log_utils.log('ICouchTuner - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
html = client.request(str(url['url']))
results = client.parseDOM(html, 'strong')
for content in results:
try:
show_url, url_text = re.compile('href="(.+?)">(.+?)</a>',re.DOTALL).findall(content)[0]
# older links have "nofollow" after href, but not showing hosts on items I tested, so doesn't matter if those are "broken" for scraping.
except:
continue
chkstr = 'Season %s Episode %s' % (season, episode)
chkstr2 = 'S%s Episode %s' % (season, episode)
if (chkstr.lower() in url_text.lower()) or (chkstr2.lower() in url_text.lower()):
return show_url
return
except:
failure = traceback.format_exc()
log_utils.log('ICouchTuner - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
html = client.request(url)
watchlink = client.parseDOM(html, 'div', attrs={'class':'entry'})[0]
watchlink = client.parseDOM(watchlink, 'a', ret='href')[0]
html = client.request(watchlink)
posttabs = client.parseDOM(html, 'div', attrs={'class':'postTabs_divs'})
for content in posttabs:
host = re.compile('<b>(.+?)</b>',re.DOTALL).findall(content)[0]
vid_url = client.parseDOM(content, 'iframe', ret='src')[0]
sources.append({'source':host,'quality':'SD','language': 'en','url':vid_url,'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('ICouchTuner - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url | gpl-2.0 | -104,094,057,217,636,030 | 42.969072 | 156 | 0.52228 | false |
MRCIEU/melodi | browser/views.py | 1 | 51244 | import sys
import gzip
import time
import numpy
import json
import time
import logging
import operator
import config
import csv
import StringIO
import subprocess
import HTMLParser
#from py2neo import Graph, Path, Node, Relationship,authenticate
from scipy import stats
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404, render
from browser.forms import (CreateSemSet,ComSearchSets,CreateSearchSet,CreatePubSet)
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
#from browser.medline_parser import *
from browser.tasks import *
from browser.models import SearchSet,Compare,Overlap,Filters
from django.template.defaulttags import register
#from django.template.context_processors import csrf
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
from math import exp
from django_datatables_view.base_datatable_view import BaseDatatableView
from collections import defaultdict
from django.views.decorators.csrf import csrf_exempt
from django.core.cache import cache
from django.views.decorators.cache import cache_page
from django.core.serializers.json import DjangoJSONEncoder
from django.core import serializers
from sets import Set
from settings import DATA_FOLDER
#neo4j
from neo4j.v1 import GraphDatabase,basic_auth
auth_token = basic_auth(config.user, config.password)
driver = GraphDatabase.driver("bolt://"+config.server+":"+config.port,auth=auth_token)
#logger.debug(config.server)
#===============GoogleAuth Start
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.views.generic.base import View
from social_auth.backends import AuthFailed
from social_auth.views import complete
from django.contrib.auth.decorators import login_required
from django.conf import settings
#rest API
from rest_framework import viewsets
from browser.serializers import SearchSetSerializer
#logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',level=logging.WARNING)
#logger = logging.getLogger(__name__)
#logging.basicConfig(filename='run.log',level=logging.DEBUG)
class SearchSetViewSet(viewsets.ModelViewSet):
#queryset = SearchSet.objects.all()
serializer_class = SearchSetSerializer
def get_queryset(self):
user = self.request.user
logger.debug('user.id = '+str(user.id))
return SearchSet.objects.filter(user_id=str(user.id))
#return SearchSet.objects.all()
class AuthComplete(View):
def get(self, request, *args, **kwargs):
logging.warning('error')
backend = kwargs.pop('backend')
try:
return complete(request, backend, *args, **kwargs)
except AuthFailed:
logging.warning('error')
messages.error(request, "Your Google Apps domain isn't authorized for this app")
return HttpResponseRedirect(reverse('gauth_login'))
class LoginError(View):
def get(self, request, *args, **kwargs):
return HttpResponse(status=401)
#===============GoogleAuth End
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def mysplit(value, sep = "."):
parts = value.split(sep)
return (parts)
tmpDir=settings.MEDIA_ROOT
def people(request):
#example query
sem="match (s:SearchSet)--(p:Pubmed)--(st:SDB_triple)--(si:SDB_item) where s.name = 'tom gaunt_2' return count(distinct(p)) as c,si order by c;"
mesh="match (s:SearchSet)--(p:Pubmed)--(m:Mesh) where s.name = 'tom gaunt_2' return count(distinct(p)) as c,m order by c;"
def ajax_graph_metrics(request):
session = driver.session()
logger.debug('getting graph metrics')
uAll=SearchSet.objects.order_by().values_list('user_id',flat=True).exclude(user_id='2').distinct().count()
if request.is_ajax():
# get data for graph
# only want search sets that I (user 2) haven't created
#gCom = "match (s:SearchSet) where not s.name =~ '.*_2' return count(s) as s union match (s:Pubmed) return count(s) as s union match (s:Mesh) return count(s) as s union match (s:SDB_triple) return count(s) as s union match (s:SDB_item) return count(s) as s;"
gCom = "match (s:Pubmed) return count(s) as s union match (s:Mesh) return count(s) as s union match (s:SDB_triple) return count(s) as s union match (s:SDB_item) return count(s) as s;"
logger.debug(gCom)
#data = [int(uAll)]
data = []
for res in session.run(gCom):
data.append(res[0])
metrics = data
logger.debug(data)
# get user and article set over time
# select user_id,job_start from browser_searchset where user_id != 2 and job_status = 'Complete';
logger.debug("getting time data...")
s = SearchSet.objects.filter(job_status='Complete').exclude(user_id='2')
tData = []
aDic = {}
for i in s:
u = i.user_id
t = i.job_start.split(" ")[0].split("-")[0:2]
t = "-".join(t)
if t in aDic:
aDic[t].append(u)
else:
aDic[t] = [u]
c = Compare.objects.filter(job_status='View results').exclude(user_id='2')
cDic = {}
for i in c:
#id = i.id
id = i.job_name
t = i.job_start.split(" ")[0].split("-")[0:2]
t = "-".join(t)
if t in cDic:
cDic[t].append(id)
else:
cDic[t] = [id]
cats = []
uCounts = []
aCounts = []
cCounts = []
cCountOld = []
oldCount = []
for a in sorted(aDic):
cats.append(a)
#logger.debug(a)
if a in aDic:
uCount = len(list(set(aDic[a] + oldCount)))
uCounts.append(uCount)
aCount = len(aDic[a] + oldCount)
aCounts.append(aCount)
oldCount = aDic[a] + oldCount
else:
uCounts.append(0)
aCounts.append(0)
if a in cDic:
cCount = len(list(set(cDic[a] + cCountOld)))
cCounts.append(cCount)
cCountOld = cDic[a] + cCountOld
else:
cCounts.append(0)
lastTop=24
uCounts = uCounts[-lastTop:len(uCounts)]
aCounts = aCounts[-lastTop:len(aCounts)]
cCounts = cCounts[-lastTop:len(cCounts)]
cats = cats[-lastTop:len(cats)]
#logger.debug(uCounts)
else:
data = 'fail'
logger.debug('not ajax request')
mimetype = 'application/json'
session.close()
return HttpResponse(json.dumps({'metrics':metrics,'uCounts':uCounts,'aCounts':aCounts,'cCounts':cCounts,'cats':cats}), mimetype)
def ajax_test(request):
object = ''
f = tmpDir+'/new.txt'
logger.debug('looking for file ',f)
if os.path.isfile(f):
object = "file exists"
logger.debug('object exists')
return HttpResponse(object)
def issn_to_name(iList):
logger.debug('Running issn_to_name'+str(iList))
#check for null entries
if 'null' in iList:
iList.remove('null')
iString = ",".join(iList)
start=time.time()
print "\n### Getting ids ###"
url="http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
#params = {'term': '0140-6736+OR+0022-2275'}
params = {'term': iString}
r = requests.post(url)
# GET with params in URL
r = requests.get(url, params=params)
#create random file name
n = 10
ran=''.join(["%s" % randint(0, 9) for num in range(0, n)])
rSplit = r.text.split("<")
iDic = {}
iName = []
for i in rSplit:
l = re.match(r'To>(.*?)$', i)
if l:
m = l.group(1).replace('[Journal]','').replace('"','').strip().encode("ascii")
iName.append(m)
for i in range(0,len(iList)):
iDic[iList[i]]=iName[i]
logger.debug(iDic)
return iDic
def pubmed_id_details(pList):
logger.debub('Getting pubmed info')
def pmid_to_info(pList):
#logger.debug('Running pmid_to_info'+str(pList))
iString = ",".join(pList)
start=time.time()
print "\n### Getting ids ###"
url="http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?"
#params = {'term': '0140-6736+OR+0022-2275'}
params = {'db':'pubmed','id': iString}
# GET with params in URL
r = requests.get(url, params=params)
#print r.text
rSplit = r.text.split("<")
ptDic = {}
pjDic = {}
t = jt = 'n/a'
for i in rSplit:
#print "i",i
#check pubmed id
pmid_match = re.match(r'Item Name="pubmed" Type="String">(.*?)$', i)
if pmid_match:
pmid = pmid_match.group(1)
#print pmid
#get title
t_match = re.match(r'Item Name="Title" Type="String">(.*?)$', i)
if t_match:
t = t_match.group(1)
#print t
#get jorunal name
jt_match = re.match(r'Item Name="FullJournalName" Type="String">(.*?)$', i)
if jt_match:
jt = jt_match.group(1)
#print jt
entry_match = re.match(r'/DocSum>', i)
if entry_match:
#print "\n"
ptDic[pmid]=t
pjDic[pmid]=jt
jt='n/a'
t='n/a'
#print pDic
return [ptDic,pjDic]
def about(request):
context = {'nbar': 'about'}
return render_to_response('about.html', context, context_instance=RequestContext(request))
def citation(request):
context = {'nbar': 'citation'}
return render_to_response('citation.html', context, context_instance=RequestContext(request))
def help(request):
context = {'nbar': 'help'}
return render_to_response('help.html', context, context_instance=RequestContext(request))
def dt_test_page(request):
return render_to_response('dt_test_page.html')
def contact(request):
context = {'nbar': 'contact'}
return render_to_response('contact.html', context, context_instance=RequestContext(request))
def get_semmed_items(request):
session = driver.session()
if request.is_ajax():
q = request.GET.get('term', '').split(',')[-1].strip()
logger.debug('q = '+q)
#get data for autocomplete
gCom = "match (sem:SDB_item) where sem.name =~ '(?i)"+q+".*' return sem.name;"
logger.debug(gCom)
sList = []
for res in session.run(gCom):
v=res[0].encode("ascii")
json_data = {}
json_data['id']=v
json_data['label']=v
json_data['value']=v
sList.append(json_data)
sList_json = json.dumps(sList)
logger.debug(len(sList))
else:
data = 'fail'
logger.debug('not ajax request')
mimetype = 'application/json'
session.close()
return HttpResponse(sList_json, mimetype)
def index(request):
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In index")
form1 = CreateSearchSet()
form_sem = CreateSemSet()
form2 = ComSearchSets()
form_pub = CreatePubSet()
if request.method == 'POST':
# create a form instance and populate it with data from the request:
if request.POST['formType'] == "ss":
if request.user.is_authenticated():
form1 = CreateSearchSet(request.POST, request.FILES)
#print "f = ",request.FILES
# check whether it's valid:
if form1.is_valid():
# process the data in form.cleaned_data as required
ss_file = request.FILES['ss_file']
#save to file
fileStore = tmpDir+'abstracts/'+str(ss_file)
id=form1.cleaned_data['job_name'].strip()
#remove special characters
id = re.sub('[^A-Za-z0-9 _-]+', '', id)
desc=form1.cleaned_data['ss_desc'].strip()
searchParams=[id,str(request.user.id)]
#add job and user data to sqlite db
q = SearchSet(user_id=str(request.user.id), job_name=id, job_start=time.strftime("%Y-%m-%d %H:%M:%S"),job_status='Pending',ss_desc=desc,pTotal=0,ss_file=ss_file,job_progress=0)
q.save()
#run job in background
#j = db_citations.delay(searchParams,fileStore)
j = pmid_process.delay(searchParams,fileStore)
SearchSet.objects.filter(user_id=str(request.user.id),job_name=id).update(job_id=j)
# redirect to a new URL:
return HttpResponseRedirect('jobs/')
else:
logger.debug(userInfo+"User authentication problem")
return HttpResponseRedirect('/')
if request.POST['formType'] == "ss_sem":
if request.user.is_authenticated():
form_sem = CreateSemSet(request.POST)
#print "f = ",request.FILES
# check whether it's valid:
if form_sem.is_valid():
# process the data in form.cleaned_data as required
#add to graph db
id=form_sem.cleaned_data['job_name'].strip()
#remove special characters
id = re.sub('[^A-Za-z0-9 _-]+', '', id)
desc=form_sem.cleaned_data['ss_desc'].strip()
sem_location = request.POST["ss_sem"]
logger.debug('job_name = '+id)
logger.debug('desc = '+desc)
logger.debug('sem location = '+sem_location)
searchParams=[id,str(request.user.id), sem_location,desc]
#add job and user data to sqlite db
descWithSem = sem_location+": "+desc
q = SearchSet(user_id=str(request.user.id), job_name=id, job_start=time.strftime("%Y-%m-%d %H:%M:%S"),job_status='Pending',ss_desc=descWithSem,pTotal=0,ss_file='',job_progress=0)
q.save()
#run job in background
logger.debug(userInfo+"Running db_sem")
j = db_sem.delay(searchParams)
SearchSet.objects.filter(user_id=str(request.user.id),job_name=id).update(job_id=j)
# redirect to a new URL:
return HttpResponseRedirect('jobs/')
else:
logger.debug(userInfo+"User authentication problem")
return HttpResponseRedirect('/')
if request.POST['formType'] == "ss_pub":
if request.user.is_authenticated():
form_pub = CreatePubSet(request.POST)
#print "f = ",request.FILES
# check whether it's valid:
if form_pub.is_valid():
# process the data in form.cleaned_data as required
#add to graph db
id=form_pub.cleaned_data['job_name'].strip()
#remove special characters
id = re.sub('[^A-Za-z0-9 _-]+', '', id)
desc=form_pub.cleaned_data['ss_desc'].strip()
logger.debug('job_name = '+id)
logger.debug('desc = '+desc)
searchParams=[id,str(request.user.id),desc]
q = SearchSet(user_id=str(request.user.id), job_name=id, job_start=time.strftime("%Y-%m-%d %H:%M:%S"),job_status='Pending',ss_desc=desc,pTotal=0,ss_file='',job_progress=0)
q.save()
#run job in background
logger.debug(userInfo+"Running pub_sem")
j = pub_sem.delay(searchParams)
SearchSet.objects.filter(user_id=str(request.user.id),job_name=id).update(job_id=j)
# redirect to a new URL:
return HttpResponseRedirect('jobs/')
else:
logger.debug(userInfo+"User authentication problem")
return HttpResponseRedirect('/')
if request.POST['formType'] == "com":
logger.debug(userInfo+"Comparing search sets")
form2 = ComSearchSets(request.POST)
if form2.is_valid():
ass=form2.cleaned_data['a']
bss=form2.cleaned_data['b']
comType = form2.cleaned_data['comType']
#get year range data
yearRange = request.POST["yearRange"]
logger.debug('yearRange:'+yearRange)
#add one year to upper bound to make it inclusive
year2 = int(yearRange.split("-")[1].strip())
yearRange = yearRange.split("-")[0].strip()+" - "+str(year2)
logger.debug('yearRange corrected:'+yearRange)
#check if analysing one or two search sets
if len(ass)>1 and len(bss)==0:
logger.debug(userInfo+"analysing single search set")
logger.debug("ss1 - "+str(ass))
s1=SearchSet.objects.get(job_name=ass,user_id=str(request.user.id))
jobName = str(s1.id)
for c in comType:
print "c = ",c
try:
jCheck = Compare.objects.get(job_name=jobName,year_range=yearRange,user_id=str(request.user.id),job_type=c)
logger.debug(userInfo+"job_status = "+str(jCheck.job_status))
#delete entry if not complete and resubmitted
if jCheck.job_progress != 100:
logger.debug(userInfo+"Deleting job: "+str(jCheck.job_name))
jCheck.delete()
jCheck = False
except ObjectDoesNotExist:
jCheck = False
if jCheck==False:
jobDesc = str(s1.job_name)
q = Compare(user_id=str(request.user.id), year_range=yearRange, job_desc=jobDesc, job_name=jobName, job_start=time.strftime("%Y-%m-%d %H:%M:%S"), job_status='Pending',job_type=c,job_progress=0)
q.save()
j=single_ss_Wrapper.delay(str(request.user.id),s1.id,c,yearRange)
else:
logger.debug(userInfo+"Search set comparison already run")
elif len(ass)>1 and len(bss)>1:
logger.debug(userInfo+"analysing two search sets")
logger.debug("ss1 - "+str(ass))
logger.debug("ss2 - "+str(bss))
#get ids for search sets
s1=SearchSet.objects.get(job_name=ass,user_id=str(request.user.id))
s2=SearchSet.objects.get(job_name=bss,user_id=str(request.user.id))
#include year2 to deal with year filtering option
jobName = str(s1.id)+"_"+str(s2.id)+"_"+str(year2)
for jobType in comType:
logger.debug("jobType = "+jobType)
try:
jCheck = Compare.objects.get(job_name=jobName,year_range=yearRange,user_id=str(request.user.id),job_type=jobType)
logger.debug(userInfo+"job_status = "+str(jCheck.job_status))
#removed this section as allows same job to run if already running.
#delete entry if not complete and resubmitted
#if jCheck.job_progress != 100:
# logger.debug(userInfo+"Deleting job: "+str(jCheck.job_name))
# jCheck.delete()
# jCheck = False
except ObjectDoesNotExist:
jCheck = False
if jCheck==False:
jobDesc = str(s1.job_name)+" : "+str(s2.job_name)
q = Compare(user_id=str(request.user.id), job_desc=jobDesc, year_range=yearRange, job_name=jobName, job_start=time.strftime("%Y-%m-%d %H:%M:%S"), job_status='Pending',job_type=jobType,job_progress=0)
q.save()
#j=comWrapper.delay(str(request.user.id),s1.id,s2.id,jobType,yearRange)
j=comWrapper.delay(q.id)
else:
logger.debug(userInfo+"Search set comparison already run")
return HttpResponseRedirect('jobs/')
else:
form1 = CreateSearchSet()
form_sem = CreateSemSet()
form2 = ComSearchSets()
form_pub = CreatePubSet()
#get search set data for table
j=SearchSet.objects.filter(user_id=str(request.user.id),job_status='Complete')
#get example data for table
exampleData=[]
eCheck={}
e=Compare.objects.filter(user_id='None',job_status='View results')
for i in e:
eName = i.job_name
c1,c2=i.job_desc.split(':')
if eName in eCheck:
eCheck[eName].append(i.job_type+':'+str(i.id))
else:
eCheck[eName]=[c1,c2,i.job_start,i.job_type+':'+str(i.id)]
#sort the methods
for e in eCheck:
eCheck[e][3:6] = sorted(eCheck[e][3:6])
#exampleData[i.job_desc]=[c1,c2,i.job_start]
logger.debug(eCheck)
context = {'s': j, 'exampleData':eCheck, 'form1': form1, 'form2': form2, 'form_sem':form_sem, 'form_pub':form_pub, 'nbar': 'home'}
return render_to_response('index.html', context, context_instance=RequestContext(request))
@cache_page(None)
def articleDetails(request,num):
session = driver.session()
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In article details")
resID=num
logger.debug(userInfo+str(resID))
q = SearchSet.objects.get(id=resID)
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed) where s.name = '"+q.job_name+"_"+q.user_id+"' return p.dp,p.issn;"
logger.debug(userInfo+"gCom:"+gCom)
years = set()
yearCounts = defaultdict(dict)
for res in session.run(gCom):
if type(res[0]) != type(None):
#logger.debug(res)
y = res[0].split(" ")[0]
j = res[1]
if type(y) != type(None) and type(j) != type(None) and y != '':
y = int(y)
j = j.encode("ascii")
years.add(y)
if y in yearCounts:
if j in yearCounts[y]:
yearCounts[y][j]+=1
else:
yearCounts[y][j]=1
else:
yearCounts[y][j]=1
#logger.debug(years)
article_data=[]
if len(years)>0:
years = range(min(years),max(years)+1)
logger.debug(years)
#logger.debug(len(yearCounts))
#'1995': {'1040-872X': 1, '0090-3493': 2
jTotals = {}
for i in yearCounts:
#logger.debug('i = '+str(i))
for j in yearCounts[i]:
if j in jTotals:
jTotals[j] = jTotals[j]+1
else:
jTotals[j]=1
jTotals[j]
#logger.debug(str(j)+":"+str(yearCounts[i][j]))
numTopJ = 10
topJs = dict(sorted(jTotals.items(), key=operator.itemgetter(1),reverse=True)[0:numTopJ])
#logger.debug(topJs)
#create top counts
topCounts = defaultdict(dict)
for i in years:
topCounts[i]['Other']=0
for j in topJs:
if i in yearCounts:
if j in yearCounts[i]:
topCounts[i][j] = yearCounts[i][j]
else:
topCounts[i][j] = 0
else:
topCounts[i][j] = 0
#logger.debug(topCounts)
#add counts not in the top set as 'Other'
for i in yearCounts:
for j in yearCounts[i]:
if j not in topCounts[i]:
topCounts[int(i)]['Other'] += yearCounts[i][j]
#logger.debug(topCounts)
#convert ISSN to name
iList = []
for i in topJs:
iList.append(i)
iName = issn_to_name(iList)
topJs['Other']=0
for t in topJs:
if t in iName:
a = {'name':iName[t],'data':[]}
else:
a = {'name':t,'data':[]}
for i in topCounts:
a['data'].append(topCounts[i][t])
article_data.append(a)
#logger.debug(article_data)
context = {'years':years,'aData':json.dumps(article_data),'ss':q.job_name,'nbar': 'results'}
session.close()
return render_to_response('articles.html', context, context_instance=RequestContext(request))
#@login_required
def jobs(request):
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In jobs")
context = {'nbar': 'results'}
return render_to_response('jobs.html', context, context_instance=RequestContext(request))
#@login_required
@cache_page(None)
def results(request,num):
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In results")
resID=num
logger.debug(userInfo+str(resID))
#find out if it's a shared result
uuid_regex = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
r = re.match(uuid_regex,num)
userStatus = 'user'
if r:
logger.debug('Results page URL is a UUID')
q = Compare.objects.get(hash_id=resID)
#set resID back to ID
resID=q.id
if str(q.user_id) != str(request.user.id) and q.share==False:
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+str(q.user_id))
return HttpResponseRedirect('/')
elif str(q.user_id) != str(request.user.id) and q.share==True:
userStatus = 'guest'
else:
q = Compare.objects.get(id=resID)
if str(q.user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+str(q.user_id))
return HttpResponseRedirect('/')
shareStatus = q.share
jobDir = ""
d1 = {}
d2 = {}
#cor_pval=1e-5
jobDir = q.job_name
d = str(q.job_name)
#get hash_id
hash_id = q.hash_id
#get year end
year2 = int(q.year_range.split('-')[1].strip())-1
#check for single sarch set jobs
if '_' in d:
#get search set names
s1_name = q.job_desc.split(":")[0]
s2_name = q.job_desc.split(":")[1]
logger.debug(userInfo+"two ss results")
if q.job_type == 'Temmpo':
#o = Overlap.objects.filter(mc_id=resID)
#o_json = []
#for i in o:
# dic = {}
# dic['uniq_a']=i.uniq_a
# dic['uniq_b']=i.uniq_b
# o_json.append(dic)
#o_json = json.dumps(o_json)
#o_json = serializers.serialize('json', o, fields=('name','uniq_a','uniq_b'))
context={'hash_id':hash_id, 'res':resID,'resA':d1,'resB':d2, 'nbar': 'results', 's1_name':s1_name, 's2_name':s2_name, 'year2':year2,'userStatus':userStatus,'shareStatus':shareStatus}
else:
o = Overlap.objects.filter(mc_id=resID).count()
#get semmed concepts
cFile=DATA_FOLDER+'SRDEF.txt'
infile = open(cFile, 'r')
cDic = {}
for line in infile:
if not line.startswith("#"):
cDic[(line.split("|")[0])]=line.split("|")[1]
#convert to JSON
cDic_json = json.dumps(cDic)
#check if files exist
f=tmpDir + 'saved_data/fet/' + str(d.split("_")[0]) + '_'+str(year2+1) + '.' + q.job_type + '.fet.gz'
logger.debug('Reding data from '+f)
if os.path.isfile(f):
with gzip.open(f, 'rb') as f:
next(f)
for line in f:
l = line.rstrip('\n').encode("ascii").split("\t")
#if float(l[7]) <= cor_pval:
d1[l[0]] = ["{:,}".format(int(l[1])) + "/" + "{:,}".format(int(l[2])),
"{:,}".format(int(float(l[3]))) + "/" + "{:,}".format(int(float(l[4]))), ("%4.2f" % float(l[5])),
("%03.02e" % float(l[6])), ("%03.02e" % float(l[7]))]
f=tmpDir + 'saved_data/fet/' + str(d.split("_")[1]) + '_'+str(year2+1) + '.' + q.job_type + '.fet.gz'
if os.path.isfile(f):
with gzip.open(f, 'rb') as f:
next(f)
for line in f:
l = line.rstrip('\n').encode("ascii").split("\t")
#if float(l[7]) <= cor_pval:
d2[l[0]] = ["{:,}".format(int(l[1])) + "/" + "{:,}".format(int(l[2])),
"{:,}".format(int(float(l[3]))) + "/" + "{:,}".format(int(float(l[4]))), ("%4.2f" % float(l[5])),
("%03.02e" % float(l[6])), ("%03.02e" % float(l[7]))]
# d['pTotal']="{:,}".format(int(r[3]))
context={'hash_id':hash_id, 'res':resID,'resA':d1,'resB':d2, 'nbar': 'results', 's1_name':s1_name, 's2_name':s2_name, 'overlap':o,'year2':year2,'cDic':cDic_json,'userStatus':userStatus,'shareStatus':shareStatus}
if q.job_type == 'meshMain':
return render_to_response('mesh.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_t' or q.job_type == 'semmed':
return render_to_response('semmed.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_c':
return render_to_response('semmed_c.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_c':
return render_to_response('semmed_c.html', context, context_instance=RequestContext(request))
elif q.job_type == 'Temmpo':
return render_to_response('temmpo_res.html', context, context_instance=RequestContext(request))
else:
logger.debug(userInfo+"single ss results")
f=tmpDir + 'saved_data/fet/' + str(d) + '_'+str(year2+1)+ '.' + q.job_type + '.fet.gz'
if os.path.isfile(f):
with gzip.open(f, 'rb') as f:
next(f)
for line in f:
l = line.rstrip('\n').encode("ascii").split("\t")
#if float(l[7]) <= cor_pval:
d1[l[0]] = ["{:,}".format(int(l[1])) + "/" + "{:,}".format(int(l[2])),
"{:,}".format(float(l[3])) + "/" + "{:,}".format(float(l[4])), ("%4.2f" % float(l[5])),
("%03.02e" % float(l[6])), ("%03.02e" % float(l[7]))]
context={'hash_id':hash_id, 'res':resID,'resA':d1, 'nbar': 'results','s1_name':q.job_desc,'year2':year2,'userStatus':userStatus,'shareStatus':shareStatus}
if q.job_type == 'meshMain':
return render_to_response('mesh_single.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_t' or q.job_type == 'semmed_c':
return render_to_response('semmed_single.html', context, context_instance=RequestContext(request))
class OrderListJson(BaseDatatableView):
# The model we're going to show
model=Compare
columns = ['user_id', 'job_name', 'job_desc']
order_columns = ['user_id']
max_display_length = 500
def get_initial_queryset(self):
return Compare.objects
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
for item in qs:
json_data.append([
'fish',
item.user_id,
item.job_name,
item.job_desc,
])
return json_data
class ajax_searchset(BaseDatatableView):
#get the user id
#user_id = 'None'
#def __init__(self, *args, **kwargs):
# self.request = kwargs.pop('request', None)
# super(ajax_searchset, self).__init__(*args, **kwargs)
# The model we're going to show
#model=SearchSet
#model=SearchSet.objects.filter(user_id=str(2))
columns = ['job_name', 'ss_desc', 'job_start', 'job_status','job_progress','id']
order_columns = ['job_name', 'ss_desc', 'job_start', 'job_status','job_progress','id']
max_display_length = 500
def get_initial_queryset(self):
user_id = self.request.user.id
return SearchSet.objects.filter(user_id=str(user_id))
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
for item in qs:
json_data.append([
item.job_name,
item.ss_desc,
item.job_start,
item.job_status,
item.job_progress,
item.id
])
return json_data
class ajax_compare(BaseDatatableView):
# The model we're going to show
model=Compare
#model=SearchSet.objects.filter(user_id=str(request.user.id))
columns = ['job_desc','job_type','job_start', 'job_status','job_progress','id']
order_columns = ['job_desc','job_type','job_start', 'job_status','job_progress','']
max_display_length = 500
def get_initial_queryset(self):
user_id = self.request.user.id
return Compare.objects.filter(user_id=str(user_id))
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
for item in qs:
job_desc = item.job_desc
if item.year_range != '1950 - 2019':
year1 = item.year_range.split('-')[0].strip()
year2 = int(item.year_range.split('-')[1].strip())-1
#logger.debug('y1:'+year1+' y2:'+str(year2))
#year2 = int(item.year_range.split('-')[1].strip())+1
job_desc = job_desc+' ('+year1+'-'+str(year2)+')'
json_data.append([
job_desc,
item.job_type,
item.job_start,
item.job_status,
item.job_progress,
item.id
])
return json_data
class ajax_overlap(BaseDatatableView):
# The model we're going to show
model=Overlap
#model=SearchSet.objects.filter(user_id=str(request.user.id))
columns = ['name', 'uniq_a','uniq_b','shared','score','mean_cp','mean_odds','treeLevel','id']
order_columns = ['name', 'uniq_a','uniq_b','shared','score','mean_cp','mean_odds','treeLevel','id']
max_display_length = 500
def get_initial_queryset(self):
#resID = 926
#user_id = self.request.user.id
resID = self.request.GET.get('resID',None)
logger.debug('resID: '+resID)
return Overlap.objects.filter(mc_id=resID)
def filter_queryset(self, qs):
logger.debug('filter_queryset')
# use request parameters to filter queryset
# using standard filter
search = self.request.GET.get(u'search[value]', None)
if search:
search = search
logger.debug('Searching with filter '+search)
qs = qs.filter(name__icontains=search)
#get analysis type
aType = self.request.GET.get('t', None)
logger.debug('Filter query on '+aType)
#filter using negative search terms
negVals = self.request.GET.get('n',None)
if negVals:
negVals = json.loads(negVals)
#deal with thml
negVals = HTMLParser.HTMLParser().unescape(negVals)
#logger.debug('nVals = '+str(negVals))
if aType == 'semmed':
for i in negVals:
if len(negVals[i])>0:
#neg = negVals[i]
#negList = negVals[i].replace('(','\(').replace(')','\)').split('||')
negList = negVals[i].split('||')
logger.debug(i+":"+str(negList))
if i == 's1':
qs = qs.exclude(name1__in=negList)
elif i == 's2':
qs = qs.exclude(name2__in=negList)
elif i == 's3':
qs = qs.exclude(name3__in=negList)
elif i == 's4':
qs = qs.exclude(name4__in=negList)
elif i == 's5':
qs = qs.exclude(name5__in=negList)
else:
if len(negVals)>0:
negVals = negVals.replace('(','\(').replace(')','\)')
logger.debug('filtering on negVals '+negVals)
qs = qs.exclude(name__iregex=r''+negVals+'')
negList = negVals.split('||')
#filter using positive search terms
posVals = self.request.GET.get('p',None)
if posVals:
posVals = json.loads(posVals)
posVals = HTMLParser.HTMLParser().unescape(posVals)
#logger.debug('pVals = '+str(posVals))
if aType == 'semmed':
for i in posVals:
if len(posVals[i])>0:
#p = posVals[i]
#posList = posVals[i].replace('(','\(').replace(')','\)').split('||')
posList = posVals[i].split('||')
#logger.debug(i+":"+p)
if i == 's1':
qs = qs.filter(name1__in=posList)
elif i == 's2':
qs = qs.filter(name2__in=posList)
elif i == 's3':
qs = qs.filter(name3__in=posList)
elif i == 's4':
qs = qs.filter(name4__in=posList)
elif i == 's5':
qs = qs.filter(name5__in=posList)
#reg = r'^'+r1+'\|\|'+r2+'\|\|'+r3+'\|\|'+r4+'\|\|'+r5
#logger.debug(reg)
#qs = qs.filter(name__iregex=r''+reg+'')
else:
if len(posVals)>0:
posVals = posVals.replace('(','\(').replace(')','\)')
#posList = posVals.split('||')
logger.debug('filtering on posVals ' +posVals)
qs = qs.filter(name__iregex=r''+posVals+'')
#qs = qs.filter(name__in=posList)
#filter using sliders
pval = self.request.GET.get('pval',None)
odds = self.request.GET.get('odds',None)
pfr = self.request.GET.get('pfr',None)
#logger.debug('pval:'+pval+' odds:'+odds+' pfr:'+pfr)
if pval and pval != 'NaN':
qs = qs.filter(mean_cp__lte=pval)
if odds and odds != 'NaN':
qs = qs.filter(mean_odds__gte=odds)
if pfr and pfr != 'NaN':
qs = qs.filter(treeLevel__gte=pfr)
logger.debug('len(qs)='+str(len(qs)))
return qs
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
#top = self.request.GET.get('top',None)
#logger.debug('top:'+top)
#tCount=0
#get SemMedDB concept terms
aType = self.request.GET.get('t', None)
if aType == 'semmed':
#Milk||PART_OF||Breast||261943:Breast||LOCATION_OF||Diphosphonates||10722541
# termDic = {}
# termSet = Set()
#
# for item in qs:
# #s = item.name.split("||")
# termSet.add(item.name1)
# termSet.add(item.name3)
# termSet.add(item.name5)
# termString = ', '.join('"' + item + '"' for item in termSet)
# logger.debug('termString = '+str(termString))
# session = driver.session()
#
# gCom = "match (s:SDB_item) where s.name in ["+termString+"] return s.name,s.type";
# logger.debug("gCom:"+gCom)
# for res in session.run(gCom):
# if res["s.name"] in termDic:
# a=termDic[res["s.name"]]
# termDic[res["s.name"]] = a+","+res["s.type"]
# else:
# termDic[res["s.name"]]=res["s.type"]
# logger.debug(termDic)
for item in qs:
#create type string
#s = item.name.split("||")
#ts = termDic[s[0]]+"||"+termDic[s[2]]+"||"+termDic[s[5]]
#if tCount<int(top):
json_data.append([
item.name,
item.uniq_a,
item.uniq_b,
item.shared,
item.score,
item.mean_cp,
item.mean_odds,
item.treeLevel,
item.id,
#ts
])
#tCount+=1
elif aType == 'semmed_c':
# termDic = {}
# termSet = Set()
#
# for item in qs:
# #s = item.name.split("||")
# termSet.add(item.name.split(":")[0])
# termString = ', '.join('"' + item + '"' for item in termSet)
# logger.debug('termString = '+str(termString))
# session = driver.session()
#
# gCom = "match (s:SDB_item) where s.name in ["+termString+"] return s.name,s.type";
# logger.debug("gCom:"+gCom)
# for res in session.run(gCom):
# name = res["s.name"].split(":")[0]
# if name in termDic:
# a=termDic[name]
# termDic[name] = a+","+res["s.type"]
# else:
# termDic[name]=res["s.type"]
# logger.debug(termDic)
for item in qs:
#create type string
s = item.name.split(":")[0]
#ts = termDic[s]
#if tCount<int(top):
json_data.append([
s,
item.uniq_a,
item.uniq_b,
item.shared,
item.score,
item.mean_cp,
item.mean_odds,
item.treeLevel,
item.id,
#ts
])
else:
for item in qs:
#if tCount<int(top):
json_data.append([
item.name,
item.uniq_a,
item.uniq_b,
item.shared,
item.score,
item.mean_cp,
item.mean_odds,
item.treeLevel,
item.id
])
return json_data
@cache_page(None)
def pubSingle(request,num):
session = driver.session()
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In pubSingle")
[p_id,c_id,s_id] = num.split("_")
logger.debug(p_id+' : '+c_id+' : '+s_id)
#o = Overlap.objects.get(pk=p_id)
#m = o.mc_id
#logger.debug(m)
c = Compare.objects.get(pk=c_id)
#get year range data
year1 = c.year_range.split("-")[0].strip()
year2 = c.year_range.split("-")[1].strip()
logger.debug('year1 = '+year1+' year2 = '+year2)
yearString = ''
if year1 != '1960' or year2 != '2019':
yearString = "p.dcom >= '"+year1+"' and p.dcom <= '"+year2+"' and"
#check user ids match
if str(c.user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+c.user_id)
return HttpResponseRedirect('/')
ss = ''
if s_id == '1':
ss = c.job_desc.split(":")[0].strip()+"_"+c.user_id
else:
ss=c.job_desc.split(":")[1].strip()+"_"+c.user_id
jobType = c.job_type
if jobType == "meshMain":
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:HAS_MESH{mesh_type:'main'}]->(m:Mesh) where "+yearString+" s.name = '"+ss+"' and m.mesh_id = '"+p_id.replace(':','/')+"' return s.name,p.pmid,p.dcom,m.mesh_name as sname;"
elif jobType == "semmed_t" or jobType == 'semmed':
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:SEM]->(sdb:SDB_triple) where "+yearString+" s.name = '"+ss+"' and sdb.pid = "+p_id+" return s.name,p.pmid,p.dp,sdb.s_name as sname;"
elif jobType == "semmed_c":
gCom = "match (s:SearchSet)-[r:INCLUDES]-(p:Pubmed)-[:SEM]-(st:SDB_triple)-[:SEMS|:SEMO]-(si:SDB_item) where "+yearString+" s.name = '"+ss+"' and si.name = '"+p_id+"' return s.name,p.pmid,p.dcom,si.name as sname;"
logger.debug(userInfo+"gCom:"+gCom)
pAllDic = {}
pDic = {}
pmidList = []
for res in session.run(gCom):
ss=res[0].encode("ascii")
pm=str(res[1])
pd=res[2].encode("ascii")
pmidList.append(pm)
sName = res['sname']
pAllDic[pm] = pd
if ss in pDic:
a = pDic[ss]
if pm not in a:
a.append(pm)
else:
pDic[ss] = [pm]
#get titles
ptDic,pjDic = pmid_to_info(pmidList)
for i in pAllDic:
a = pAllDic[i]
t = 'n/a'
j = 'n/a'
if i in ptDic:
t = ptDic[i]
if i in pjDic:
j = pjDic[i]
b = (t,j,a)
pAllDic[i] = b
#print pDic
#logger.debug(userInfo+"pDic:"+str(pDic))
sDic = {}
sList = list()
for i in pDic[ss]:
e = {'pmid':i}
sList.append(e)
ss_name = ss.rsplit("_",1)[0]
#logger.debug(sList)
context = {'sList':sList,'ss_name':ss_name, 'tab':'single','mName':sName, 'pAllDic':pAllDic, 'nbar': 'results'}
session.close()
return render_to_response('pubs_single.html', context, context_instance=RequestContext(request))
@cache_page(None)
def pubDetails(request,num):
session = driver.session()
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In pubDetails")
p_id = num.split("_")[0]
tab = num.split("_")[1]
if tab == '0':
tab = 's1'
elif tab == '1':
tab = 's2'
elif tab == '2':
tab = 'shared'
o = Overlap.objects.get(pk=p_id)
m = o.mc_id
logger.debug(m)
c = Compare.objects.get(pk=m.id)
#get year range data
year1 = c.year_range.split("-")[0].strip()
year2 = c.year_range.split("-")[1].strip()
logger.debug('year1 = '+year1+' year2 = '+year2)
yearString = ''
if year1 != '1960' or year2 != '2019':
yearString = "p.dcom >= '"+year1+"' and p.dcom <= '"+year2+"' and"
#check user ids match
if str(c.user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+c.user_id)
return HttpResponseRedirect('/')
ss1=c.job_desc.split(":")[0].strip()+"_"+c.user_id
ss2=c.job_desc.split(":")[1].strip()+"_"+c.user_id
mName = o.name.split(":")[0].split('(',1)[0].strip()
jobType = m.job_type
if jobType == "meshMain":
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:HAS_MESH{mesh_type:'main'}]->(m:Mesh) where "+yearString+" s.name in ['"+ss1+"','"+ss2+"'] and m.mesh_name = '"+mName+"' return s.name,p.pmid,p.dcom;"
elif jobType == "notMeshMain":
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:HAS_MESH]->(m:Mesh) where "+yearString+" s.name in ['"+ss1+"','"+ss2+"'] and m.mesh_name = '"+mName+"' return s.name,p.pmid,p.dcom;"
elif jobType == "semmed_t" or jobType == 'semmed':
sem_1_ID = o.name.split(":")[0].split("||")[3]
sem_2_ID = o.name.split(":")[1].split("||")[3]
t1 = o.name.split(":")[0].split("||")[0]
t2 = o.name.split(":")[0].split("||")[1]
t3 = o.name.split(":")[0].split("||")[2]
t4 = o.name.split(":")[1].split("||")[0]
t5 = o.name.split(":")[1].split("||")[1]
t6 = o.name.split(":")[1].split("||")[2]
logger.debug(t1+"|"+t6)
#if t1 == t6:
# mName = "(REVERSE) "+t4+" || "+t5+" || "+t6+" || "+t2+" || "+t3
#else:
mName = t1+" || "+t2+" || "+t3+" || "+t5+" || "+t6
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:SEM]->(sdb:SDB_triple) where "+yearString+" s.name = '"+ss1+"' and sdb.pid = "+sem_1_ID+" return s.name,p.pmid,p.dp " \
"UNION match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:SEM]->(sdb:SDB_triple) where "+yearString+" s.name = '"+ss2+"' and sdb.pid = "+sem_2_ID+" return s.name,p.pmid,p.dp;"
elif jobType == "semmed_c":
gCom = "match (s:SearchSet)-[r:INCLUDES]-(p:Pubmed)-[:SEM]-(st:SDB_triple)-[:SEMS|:SEMO]-(si:SDB_item) where "+yearString+" s.name in ['"+ss1+"','"+ss2+"'] and si.name = '"+mName+"' return s.name,p.pmid,p.dcom;"
logger.debug(userInfo+"gCom:"+gCom)
pAllDic = {}
pDic = {}
pmidList = []
for res in session.run(gCom):
ss=res[0].encode("ascii")
pm=str(res[1])
pd=res[2].encode("ascii")
pmidList.append(pm)
pAllDic[pm] = pd
if ss in pDic:
a = pDic[ss]
if pm not in a:
a.append(pm)
else:
pDic[ss] = [pm]
#get titles
ptDic,pjDic = pmid_to_info(pmidList)
for i in pAllDic:
a = pAllDic[i]
t = 'n/a'
j = 'n/a'
if i in ptDic:
t = ptDic[i]
if i in pjDic:
j = pjDic[i]
b = (t,j,a)
pAllDic[i] = b
#print pDic
#logger.debug(userInfo+"pDic:"+str(pDic))
sDic = {}
s1List = list()
s2List = list()
shareList = list()
for i in pDic:
j1 = pDic[ss1]
j2 = pDic[ss2]
sDic['o'] = list(set(j1).intersection(j2))
sDic[ss1] = list(set(j1) - set(j2))
sDic[ss2] = list(set(j2) - set(j1))
if 'o' in sDic:
for i in sDic['o']:
e = {'pmid':i}
shareList.append(e)
if ss1 in sDic:
for i in sDic[ss1]:
e = {'pmid':i}
s1List.append(e)
if ss2 in sDic:
for i in sDic[ss2]:
e = {'pmid':i}
s2List.append(e)
ss1_name = ss1.rsplit("_",1)[0]
ss2_name = ss2.rsplit("_",1)[0]
context = {'s1':s1List,'s2':s2List,'share':shareList,'ss1':ss1_name, 'ss2':ss2_name, 'tab':tab,'mName':mName, 'pAllDic':pAllDic, 'nbar': 'results'}
session.close()
return render_to_response('pubs.html', context, context_instance=RequestContext(request))
def get_task_status(task_id):
# If you have a task_id, this is how you query that task
#print "in get_task_status"
#print task_id
task = db_citations.AsyncResult(task_id)
status = task.status
progress = 0
stage=""
if status == 'SUCCESS':
progress = 100
stage = 'Complete'
elif status == 'FAILURE':
#progress = 0
stage = "Failed"
elif status == 'PROGRESS':
progress = task.info['progress']
stage = task.info['stage']
return {'status': status, 'progress': progress, 'stage':stage}
def ajax_share(request):
resID = request.GET['resID']
status = request.GET['status']
if status == 'True':
logger.debug('Sharing results - '+resID)
Compare.objects.filter(hash_id=resID).update(share=True)
else:
logger.debug('Unsharing results - '+resID)
Compare.objects.filter(hash_id=resID).update(share=False)
#SearchSet.objects.filter(job_name=job_name,user_id=user_id).update(job_status='Adding pubmed data',job_progress=10)
mimetype = 'application/json'
context={}
return HttpResponse(json.dumps(context), mimetype)
def export_to_csv(request, queryset, fields, resID):
fName = 'melodi_result_'+str(resID)+'.csv'
output = StringIO.StringIO() ## temp output file
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = 'attachment;filename='+fName+'.zip'
writer = csv.writer(output, dialect='excel')
writer.writerow(fields)
for obj in queryset:
writer.writerow([getattr(obj, f) for f in fields])
z = zipfile.ZipFile(response,'w') ## write zip to response
z.writestr(fName, output.getvalue()) ## write csv file to zip
return response
def download_result(request):
resID = request.POST.get('resID')
type = request.POST.get('type')
res_type = request.POST.get('download_res')
logger.debug('Downloading - '+str(resID)+' : '+type+ ' : '+res_type)
resID = request.POST.get('resID')
type = request.POST.get('type')
qs = Overlap.objects.filter(mc_id_id=resID)
# using standard filter
#search = request.GET.get(u'search[value]', None)
#if search:
# logger.debug('Searching with filter '+search)
# qs = qs.filter(name__icontains=search)
#get analysis type
#aType = request.GET.get('t', None)
#logger.debug('Filter query on '+aType)
if res_type == 'filt':
logger.debug('Downloading filtered - '+str(resID)+' : '+type)
#filter using negative search terms
negVals = request.POST.get('filt_results_n',None)
logger.debug(negVals)
if negVals:
negVals = json.loads(negVals)
#deal with thml
negVals = HTMLParser.HTMLParser().unescape(negVals)
#logger.debug('nVals = '+str(negVals))
if type == 'st':
for i in negVals:
if len(negVals[i])>0:
#neg = negVals[i]
negList = negVals[i].split('||')
#logger.debug(i+":"+str(negList))
if i == 's1':
qs = qs.exclude(name1__in=negList)
elif i == 's2':
qs = qs.exclude(name2__in=negList)
elif i == 's3':
qs = qs.exclude(name3__in=negList)
elif i == 's4':
qs = qs.exclude(name4__in=negList)
elif i == 's5':
qs = qs.exclude(name5__in=negList)
else:
if len(negVals)>0:
logger.debug('filtering on negVals '+negVals)
qs = qs.exclude(name__iregex=r''+negVals+'')
negList = negVals.split('||')
#filter using positive search terms
posVals = request.POST.get('filt_results_p', None)
if posVals:
posVals = json.loads(posVals)
posVals = HTMLParser.HTMLParser().unescape(posVals)
# logger.debug('pVals = '+str(posVals))
if type == 'st':
for i in posVals:
if len(posVals[i]) > 0:
# p = posVals[i]
posList = posVals[i].split('||')
# logger.debug(i+":"+p)
if i == 's1':
qs = qs.filter(name1__in=posList)
elif i == 's2':
qs = qs.filter(name2__in=posList)
elif i == 's3':
qs = qs.filter(name3__in=posList)
elif i == 's4':
qs = qs.filter(name4__in=posList)
elif i == 's5':
qs = qs.filter(name5__in=posList)
# reg = r'^'+r1+'\|\|'+r2+'\|\|'+r3+'\|\|'+r4+'\|\|'+r5
# logger.debug(reg)
# qs = qs.filter(name__iregex=r''+reg+'')
else:
if len(posVals) > 0:
posList = posVals.split('||')
logger.debug('filtering on posVals')
# qs = qs.filter(name__iregex=r''+posVals+'')
qs = qs.filter(name__in=posList)
# filter using sliders
pval = request.POST.get('filt_results_pval', None)
odds = request.POST.get('filt_results_odds', None)
pfr = request.POST.get('filt_results_pfr', None)
logger.debug('pval:'+pval+' odds:'+odds+' pfr:'+pfr)
if pval and pval != 'NaN':
qs = qs.filter(mean_cp__lte=pval)
if odds and odds != 'NaN':
qs = qs.filter(mean_odds__gte=odds)
if pfr and pfr != 'NaN':
qs = qs.filter(treeLevel__gte=pfr)
logger.debug('len(qs)=' + str(len(qs)))
#remove ids names
if type == 'st':
return export_to_csv(request, qs, fields = ('name1', 'name2', 'name3', 'name4', 'name5', 'mean_cp', 'mean_odds', 'uniq_a', 'uniq_b', 'shared', 'score', 'treeLevel'), resID=resID)
elif type == 'mesh':
for c in qs:
c.name = c.name.rsplit(":",1)[0]
return export_to_csv(request, qs, fields = ('name', 'mean_cp', 'mean_odds', 'uniq_a', 'uniq_b', 'shared', 'score', 'treeLevel'), resID=resID)
elif type == 'sc':
for c in qs:
c.name = c.name.rsplit(":",1)[0]
return export_to_csv(request, qs, fields = ('name', 'mean_cp', 'mean_odds', 'uniq_a', 'uniq_b', 'shared', 'score'), resID=resID)
def download_filter(request):
fList = request.POST.get('fList')
resID = request.POST.get('resID')
fType = request.POST.get('fType')
if fList != type(None) and len(fList)>0:
response = HttpResponse(fList, content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename="%s"' % resID+fType+'-filter.txt'
return response
def upload_filter(request):
logger.debug('uploading filter file')
context={}
return HttpResponse(json.dumps(context), 'application/json')
def save_filter(request):
logger.debug('saving filters')
resID = request.GET.get('resID')
com = Compare.objects.get(pk=resID)
type = request.GET.get('type')
negVals = json.loads(request.GET.get('nsTerm'))
posVals = json.loads(request.GET.get('psTerm'))
logger.debug('resID : ' +resID+" type : "+type)
logger.debug('nsTerm ' +str(negVals))
logger.debug('psTerm ' +str(posVals))
fCount=0
if type == 'st':
for i in negVals:
if len(negVals[i])>0:
neg = negVals[i]
logger.debug(i+":"+neg)
loc = int(i[1])
f=Filters(com_id=com.id,version=1,type=type,num=fCount,value=neg,location=loc,ftype='neg')
f.save()
fCount+=1
context={}
return HttpResponse()
def ajax_delete(request):
logger.debug('user_id = '+str(request.user.id))
if str(request.user.id) == 'None':
logger.debug('Someone is trying to delete the demo data!')
else:
session = driver.session()
id = request.GET['id']
type = request.GET['type']
logger.debug('Deleting id '+id+' for type '+type)
if type == 'AS':
s = SearchSet.objects.get(pk=id)
user_id = s.user_id
name = s.job_name
#check user ids match
if str(user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+user_id)
#return HttpResponseRedirect('/')
else:
#delete from mysql
s.delete()
Compare.objects.filter(job_name__contains=id+'_').delete()
Compare.objects.filter(job_name=id).delete()
#delete from neo4j
com="match (s:SearchSet)-[r]-(p:Pubmed) where s.name = '"+name+"_"+user_id+"' delete s,r;"
logger.debug(com)
session.run(com)
session.close()
#delete FET data
com = 'rm -r '+tmpDir+'saved_data/fet/'+id+'_*'
logger.debug(com)
subprocess.call(com, shell=True)
return HttpResponse()
def temmpo(request):
if str(request.user.id) == 'None':
return HttpResponseRedirect('/')
else:
s=SearchSet.objects.filter(user_id=str(request.user.id),job_status='Complete')
context = {'s': s}
return render_to_response('temmpo.html', context, context_instance=RequestContext(request))
def temmpo_res(request):
if str(request.user.id) == 'None':
return HttpResponseRedirect('/')
else:
user = str(request.user.id)
logger.debug('user = '+user)
as1 = request.POST.get('as1')
as2 = request.POST.get('as2')
s1=SearchSet.objects.get(job_name=as1,user_id=str(request.user.id))
s2=SearchSet.objects.get(job_name=as2,user_id=str(request.user.id))
int_file = request.FILES['intFile']
intData = int_file.read().replace("\n","','")[:-2]
#save to file
#fileStore = '/tmp/'+str(int_file)
logger.debug("Running temmpo style analysis on "+str(as1)+" and "+str(as2))
jobDesc = as1+" : "+as2
jobName = str(s1.id)+"_"+str(s2.id)+"_2019"
try:
jCheck = Compare.objects.get(job_name=jobName, job_desc=jobDesc+" : "+str(int_file), user_id=str(request.user.id),job_type='Temmpo')
# delete entry if not complete and resubmitted
if jCheck.job_progress != 100:
jCheck.delete()
jCheck = False
except ObjectDoesNotExist:
jCheck = False
if jCheck == False:
q = Compare(user_id=str(request.user.id), job_desc=jobDesc+" : "+str(int_file), year_range='1950 - 2019', job_name=jobName, job_start=time.strftime("%Y-%m-%d %H:%M:%S"), job_status='Pending',job_type='Temmpo',job_progress=0)
q.save()
j=temmpo_task.delay(q.id,intData)
return HttpResponseRedirect(reverse('jobs'))
| mit | 3,754,276,648,151,506,000 | 31.660293 | 260 | 0.636153 | false |
catmaid/catpy | setup.py | 1 | 1825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import runpy
from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.rst")) as readme_file:
readme = readme_file.read()
with open(os.path.join(here, "HISTORY.rst")) as history_file:
history = history_file.read()
version_dict = runpy.run_path(os.path.join(here, "catpy", "version.py"))
author_dict = runpy.run_path(os.path.join(here, "catpy", "author.py"))
requirements = [
"networkx>=2.0",
"numpy>=1.12",
"Pillow>=5.0",
"requests>=2.14",
"requests-futures>=0.9",
]
setup_requirements = ["pytest-runner>=2.11"]
test_requirements = ["pytest>=3"]
setup(
name="catpy",
version=version_dict["__version__"],
description="Python client for the CATMAID API",
long_description=readme + "\n\n" + history,
author=author_dict["__author__"],
author_email=author_dict["__email__"],
url="https://github.com/catmaid/catpy",
packages=["catpy", "catpy.applications"],
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="catpy catmaid neuron",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
requires_python=">=3.6",
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
)
| mit | -2,733,525,583,101,903,000 | 28.918033 | 72 | 0.631233 | false |
nicopresto/nicopresto.com.OLD | models/menu.py | 1 | 2368 | # -*- coding: utf-8 -*-
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.title = request.application
response.subtitle = T('customize me!')
response.meta.author = 'Nicholas (Nico) Preston'
response.meta.description = "Nico Preston is a global environmental health researcher at UW-Madison."
response.meta.keywords = "science, global, ecology, computing, health, limnology, open source, GIS, web2py, html5, CSS3, mapreduce, nlp, sahana, noaa, nosql, Nico Preston, Jonathan Patz, Mutlu Ozdogan, Nicholas Preston, UW-Madison"
##########################################
## this is the main application menu
## add/remove items as required
##########################################
response.menu = [
(T('Index'), False, URL(request.application,'default','index'), [])
]
##########################################
## this is here to provide shortcuts
## during development. remove in production
##
## mind that plugins may also affect menu
##########################################
response.menu+=[
(T('Edit'), False, URL('admin', 'default', 'design/%s' % request.application),
[
(T('Controller'), False,
URL('admin', 'default', 'edit/%s/controllers/%s.py' \
% (request.application,request.controller=='appadmin' and
'default' or request.controller))),
(T('View'), False,
URL('admin', 'default', 'edit/%s/views/%s' \
% (request.application,response.view))),
(T('Layout'), False,
URL('admin', 'default', 'edit/%s/views/layout.html' \
% request.application)),
(T('Stylesheet'), False,
URL('admin', 'default', 'edit/%s/static/base.css' \
% request.application)),
(T('DB Model'), False,
URL('admin', 'default', 'edit/%s/models/db.py' \
% request.application)),
(T('Menu Model'), False,
URL('admin', 'default', 'edit/%s/models/menu.py' \
% request.application)),
(T('Database'), False,
URL(request.application, 'appadmin', 'index')),
]
),
]
| mit | 6,870,631,474,979,764,000 | 42.054545 | 231 | 0.492821 | false |
raffienficiaud/django_mturk_minimalistic | boto1/management/commands/retrieve_results.py | 1 | 7604 | from django.core.management.base import BaseCommand, CommandError
from django.core.urlresolvers import reverse
from boto1.models import Image, Hit, Result
from optparse import make_option
import boto
import boto.mturk
import boto.mturk.connection
from .create_hits import get_connection
import json
import time, datetime
_time = time
from datetime import timedelta, tzinfo
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
ZERO = timedelta(0)
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
def from_w3c_to_datetime(str_date):
"""This is an utility function for converting the datetime returned by AMT to a proper
datetime in Python. The datetime in AMT contains the offset of the timezone."""
import re
result = re.findall("\d{2}(Z){1,5}", str_date)
# remove all Z
replaced = str_date.replace('Z', '')
nb_z = len(result[0]) if len(result) > 0 else 0
class NumberofZOffset(datetime.tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = datetime.timedelta(hours = offset*5)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
my_time = datetime.datetime.strptime(replaced, "%Y-%m-%dT%H:%M:%S")
my_time.replace(tzinfo=NumberofZOffset(nb_z, 'w3c' + 'z'*nb_z))
return my_time
def get_all_hits():
"""Retrieves all hits.
"""
hits = [ i for i in get_connection().get_all_hits()]
pn = 1
total_pages = 1
while pn < total_pages:
pn = pn + 1
print "Request hits page %i" % pn
temp_hits = get_connection().get_all_hits(page_number=pn)
hits.extend(temp_hits)
return hits
def get_all_reviewable_hits():
"""Retrieves all hits that are in review state"""
page_size = 50
hits = get_connection().get_reviewable_hits(page_size=page_size)
#print "Total results to fetch %s " % hits.TotalNumResults
#print "Request hits page %i" % 1
total_pages = float(hits.TotalNumResults)/page_size
int_total= int(total_pages)
if(total_pages-int_total>0):
total_pages = int_total+1
else:
total_pages = int_total
pn = 1
while pn < total_pages:
pn = pn + 1
#print "Request hits page %i" % pn
temp_hits = get_connection().get_reviewable_hits(page_size=page_size,page_number=pn)
hits.extend(temp_hits)
return hits
def get_all_responses(reviewable_hits, all_hits, assignments_to_skip = None):
"""Retrieves the content of the responses.
:param set assignments_to_skip: a set of assignments for which the results can be skipped.
:returns: a dictionary containing the collected responses. The dictionary is organized as
follow:
- The first index of the dict is the hit id.
- The second key of the dict is the assignment id
- 3 entries are filled then:
- 'response_time' is the time laps between the submission of the response and the hit creation date
- 'worker_id' is the id of the worker which submits the current response
- 'fields' is a dictionary containing the fields of the response, where the keys are the fields id
and the values are the content of the responses.
:rtype: dict
"""
hit_ids = [i.HITId for i in all_hits]
responses = {}
for hit in reviewable_hits:
assignments = get_connection().get_assignments(hit.HITId)
if not assignments_to_skip is None:
assignments = (a for a in assignments if not a.AssignmentId in assignments_to_skip)
find_hit = hit_ids.index(hit.HITId)
hit_creation_time = from_w3c_to_datetime(all_hits[find_hit].CreationTime)
current_response = {}
responses[hit.HITId] = current_response
for assignment in assignments:
current_assignment = {}
current_response[assignment.AssignmentId] = current_assignment
response_submission_time = from_w3c_to_datetime(assignment.SubmitTime)
response_time = response_submission_time - hit_creation_time
current_assignment['response_time'] = response_time
current_assignment['worker_id'] = assignment.WorkerId
fields = {}
current_assignment['fields'] = fields
for question_form_answer in assignment.answers[0]:
id, value = question_form_answer.qid, question_form_answer.fields[0]
fields[id] = value
return responses
class Command(BaseCommand):
help = 'Retrieves the results from AMT'
def print_hit_status(self, hits):
"""Prints the status of hits
:param list hits: list of hits of interest
"""
for hit in hits:
self.stdout.write("HIT id=%s status=%s created=%s UTC" % (hit.HITId, hit.HITStatus, from_w3c_to_datetime(hit.CreationTime)))
def handle(self, *args, **options):
nb_new_result_stored = 0
self.stdout.write('Get all hits from Amazon')
all_hits = get_all_hits()
all_hits_set = set((h.HITId.upper().strip() for h in all_hits))
self.stdout.write('Get all hits in review state')
review_hits = get_all_reviewable_hits()
review_set = set((h.HITId.upper().strip() for h in review_hits))
# intersect with the set of hits for this application
self.stdout.write('Intersecting with our content')
my_application_hits = set((c.hit_id.upper().strip() for c in Hit.objects.all()))
all_hits_set.intersection_update(my_application_hits)
review_set.intersection_update(my_application_hits)
all_hits = [a for a in all_hits if a.HITId in all_hits_set]
review_hits = [a for a in review_hits if a.HITId in review_set]
# already filled assignments
assignments_id_already_set = set((c.assignment_id for c in Result.objects.all()))
# retrieving the responses
responses = get_all_responses(review_hits, all_hits, assignments_id_already_set)
for hit_id, dic_assignment in responses.iteritems():
current_hit = Hit.objects.get(hit_id=hit_id)
image_object = current_hit.image
for assignment_id, values in dic_assignment.iteritems():
try:
current_response = Result.objects.create(image = image_object,
hit = current_hit,
assignment_id = assignment_id)
current_response.content = values['fields']
current_response.save()
nb_new_result_stored += 1
except Exception, e:
self.stderr.write(' responses not added to the database for image %s (assignment %s)' % (image_object.name, assignment_id))
continue
self.stdout.write('Successfully retrieved results')
self.stdout.write('- New results created %d' % nb_new_result_stored)
self.stdout.write('- Current number of results %d' % Result.objects.count())
return
| mit | 2,301,703,823,339,876,600 | 29.789474 | 134 | 0.647028 | false |
KingOfBanana/SocialNetworkAI | db/basic_db.py | 1 | 1468 | # -*-coding:utf-8 -*-
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config.conf import get_db_args
# for proxy database
from config.conf import get_proxy_db_args
# end
def get_engine():
args = get_db_args()
connect_str = "{}+pymysql://{}:{}@{}:{}/{}?charset=utf8".format(args['db_type'], args['user'], args['password'],
args['host'], args['port'], args['db_name'])
engine = create_engine(connect_str, encoding='utf-8')
return engine
# for proxydatabse
def get_proxydb_engine():
args = get_proxy_db_args()
connect_str = "{}+pymysql://{}:{}@{}:{}/{}?charset=utf8".format(args['db_type'], args['user'], args['password'],
args['host'], args['port'], args['db_name'])
engine = create_engine(connect_str, encoding='utf-8')
return engine
eng = get_engine()
Base = declarative_base()
Session = sessionmaker(bind=eng)
db_session = Session()
metadata = MetaData(get_engine())
proxy_db_eng = get_proxydb_engine()
proxy_session = sessionmaker(bind=proxy_db_eng)
proxy_db_session = proxy_session()
proxy_db_metadata = MetaData(get_proxydb_engine())
# end
# __all__ = ['eng', 'Base', 'db_session', 'metadata']
__all__ = ['eng', 'Base', 'db_session', 'metadata', 'proxy_db_eng', 'proxy_db_session', 'proxy_db_metadata'] | mit | -2,402,291,857,328,042,500 | 34.829268 | 116 | 0.613079 | false |
sirk390/coinpy | coinpy-lib/src/coinpy/lib/vm/opcode_impl/arithmetic.py | 1 | 5830 | from coinpy.lib.vm.stack_valtype import cast_to_number, valtype_from_number
from coinpy.lib.vm.opcode_impl.flow import op_verify
import functools
def arithmetic_op(vm, func, arity):
if len(vm.stack) < arity:
raise Exception("Not enought arguments")
args = [cast_to_number(vm.stack.pop()) for _ in range(arity)]
result = func(*reversed(args))
vm.stack.append(valtype_from_number(result))
arithmetic_unary_op = functools.partial(arithmetic_op, arity=1)
arithmetic_binary_op = functools.partial(arithmetic_op, arity=2)
arithmetic_ternary_op = functools.partial(arithmetic_op, arity=3)
"""
OP_1ADD: a -> a+1
1 is added to a.
"""
def op_1add(vm, instr):
arithmetic_unary_op(vm, lambda a: a + 1)
"""
OP_1SUB: a -> a - 1
1 is substracted from a.
"""
def op_1sub(vm, instr):
arithmetic_unary_op(vm, lambda a: a - 1)
"""
OP_2MUL: a -> a * 2
a is multiplied by 2.
"""
def op_2mul(vm, instr):
arithmetic_unary_op(vm, lambda a: a * 2)
"""
OP_2DIV: a -> a / 2
a is divided by 2.
"""
def op_2div(vm, instr):
arithmetic_unary_op(vm, lambda a: a / 2)
"""
OP_0NOTEQUAL: a -> a != 0 ? 1 : 0
if a is not equal to 0, return 1, otherwise return 0.
"""
def op_0notequal(vm, instr):
arithmetic_unary_op(vm, lambda x: 1 if (x != 0) else 0)
"""
OP_NEGATE: a -> -a
return the opposite of a.
"""
def op_negate(vm, instr):
arithmetic_unary_op(vm, lambda a: -a)
"""
OP_ABS: a -> (a>0) ? a : -a
Return the absolute value of a.
"""
def op_abs(vm, instr):
arithmetic_unary_op(vm, lambda a: abs(a))
"""
OP_NOT: a -> (a==0) ? 1 : -0
if a equals 0 return 1, otherwise return 0.
"""
def op_not(vm, instr):
arithmetic_unary_op(vm, lambda a: 1 if a == 0 else 0)
"""
OP_0NOTEQUAL: a -> (a!=0) ? 1 : 0
if a is different than 0 return 1, otherwise return 0.
"""
def op_0noteequal(vm, instr):
arithmetic_unary_op(vm, lambda a: 0 if a == 0 else 1)
"""
OP_ADD: a b -> a+b
a is added to b.
"""
def op_add(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: x1 + x2)
"""
OP_SUB: a b -> a-b
b is subtracted from a.
"""
def op_sub(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a - b)
"""
OP_MUL: a b -> a*b
a is multiplied by b.
"""
def op_mul(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a * b)
"""
OP_DIV: a b -> a/b
a is divided by b.
"""
def op_div(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a / b)
"""
OP_MOD: a b -> a%b
Returns the remainder after dividing a by b.
"""
def op_mod(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a % b)
"""
OP_LSHIFT: a b -> a<<b
Shifts a left b bits, preserving sign.
"""
def op_lshift(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a << b)
"""
OP_RSHIFT: a b -> a >> b
Shifts a right b bits, preserving sign.
"""
def op_rshift(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a >> b)
"""
OP_BOOLAND: a b -> a&b
If both a and b are not 0, the output is 1. Otherwise 0.
"""
def op_booland(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 != 0 and x2 != 0) and 1 or 0)
"""
OP_BOOLAND: a b -> a|b
If both a and b are not 0, the output is 1. Otherwise 0.
"""
def op_boolor(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 != 0 or x2 != 0) and 1 or 0)
"""
OP_NUMEQUAL : a b -> (a==b) ? 1 : 0
Returns 1 if the numbers are equal, 0 otherwise.
"""
def op_numequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 == x2) and 1 or 0)
"""
OP_NUMEQUALVERIFY: a b -> (a==b) ? 1 : 0
Same as OP_NUMEQUAL, but runs OP_VERIFY afterward.
"""
def op_numequalverify(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 == x2) and 1 or 0)
op_verify(vm, instr)
"""
OP_NUMEQUAL : a b -> (a!=b) ? 1 : 0
Returns 1 if the numbers are equal, 0 otherwise.
"""
def op_numnotequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 != x2) and 1 or 0)
"""
OP_LESSTHAN : a b -> (a<b) ? 1 : 0
Returns 1 if a is less than b, 0 otherwise.
"""
def op_lessthan(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 < x2) and 1 or 0)
"""
OP_GREATERTHAN : a b -> (a>b) ? 1 : 0
Returns 1 if a is less than b, 0 otherwise.
"""
def op_greaterthan(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 > x2) and 1 or 0)
"""
OP_LESSTHANOREQUAL : a b -> (a<=b) ? 1 : 0
Returns 1 if a is less than or equal to b, 0 otherwise.
"""
def op_lessthanorequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 <= x2) and 1 or 0)
"""
OP_GREATERTHANOREQUAL: a b -> (a>=b) ? 1 : 0
Returns 1 if a is greater than or equal to b, 0 otherwise.
"""
def op_greaterthanorequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 >= x2) and 1 or 0)
"""
OP_MIN: a b -> min(a, b)
Returns the smaller of a and b.
"""
def op_min(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: min(x1, x2))
"""
OP_MAX: a b -> max(a, b)
Returns the smaller of a and b.
"""
def op_max(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: max(x1, x2))
"""
OP_WITHIN: x min max -> (min <= x < max) ? 1 : 0
Returns 1 if x is within the specified range (left-inclusive), 0 otherwise.
"""
def op_within(vm, instr):
arithmetic_ternary_op(vm, lambda x, min, max: 1 if (min <= x < max) else 0)
| lgpl-3.0 | 7,772,921,792,784,396,000 | 25.743119 | 80 | 0.545798 | false |
LighthouseHPC/lighthouse | src/LAPACK341/computational_inverse/database_check.py | 1 | 1628 | import csv
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
lighthousedir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0,parentdir)
f_database = csv.reader(open(lighthousedir+'/Dlighthouse/Computational/dataBase/le_invert.csv'))
databaseRoutines = []
for line in f_database:
databaseRoutines.append(line[1]+line[2])
f_inverse_341 = open('./routines/inverse_341.txt')
inverse341Routines = []
inverse341Routines_tri = []
keys = []
for line in f_inverse_341:
line = line.rstrip('\r\n')
routine = line.split('.')[0]
inverse341Routines.append(routine)
last3 = routine[-3:]
keys.append(last3)
if last3 == 'tri':
inverse341Routines_tri.append(routine)
else:
pass
inverse341Routines = set(inverse341Routines)
#print inverse341Routines
missingRoutines = list(set(inverse341Routines_tri) - set(databaseRoutines))
f_missing = open('./routines/missingRoutines.txt', 'w')
for routine in missingRoutines:
f_missing.write(routine+'\n')
print "%s computational_inverse routines may need to be updated in the database." % len(missingRoutines)
### sort the routines in ./routines/inverse_341.txt
f_inverse_341_sort = open('./routines/inverse_341_sort.txt', 'w')
keys = set(keys)
print keys
for element in keys:
i = 0
for routine in inverse341Routines:
if element == routine[-3:]:
i += 1
f_inverse_341_sort.write(routine+'\n')
f_inverse_341_sort.write('-------------------------%s\r\n\n' % i)
f_inverse_341_sort.close()
f_inverse_341.close()
f_missing.close() | mit | -835,230,158,127,480,400 | 25.274194 | 104 | 0.672604 | false |
ros2/demos | quality_of_service_demo/rclpy/quality_of_service_demo_py/message_lost_listener.py | 1 | 2648 | # Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos_event import SubscriptionEventCallbacks
from rclpy.time import Time
from sensor_msgs.msg import Image
class MessageLostListener(Node):
"""Listener node to demonstrate how to get a notification on lost messages."""
def __init__(self):
"""Create a MessageLostListener."""
super().__init__('message_lost_listener')
# Create an object describing the event handlers that will
# be registered in the subscription.
# In this case, only a handler for a message lost event is registered.
event_callbacks = SubscriptionEventCallbacks(
message_lost=self._message_lost_event_callback)
# Create a subscription, passing the previously created event handlers.
self.subscription = self.create_subscription(
Image,
'message_lost_chatter',
self._message_callback,
1,
event_callbacks=event_callbacks)
def _message_callback(self, message):
"""Log when a message is received."""
now = self.get_clock().now()
diff = now - Time.from_msg(message.header.stamp)
self.get_logger().info(
f'I heard an Image. Message single trip latency: [{diff.nanoseconds}]\n---')
def _message_lost_event_callback(self, message_lost_status):
"""Log the number of lost messages when the event is triggered."""
self.get_logger().info(
'Some messages were lost:\n>\tNumber of new lost messages: '
f'{message_lost_status.total_count_change}'
f' \n>\tTotal number of messages lost: {message_lost_status.total_count}',
)
def main():
rclpy.init(args=None)
listener = MessageLostListener()
executor = SingleThreadedExecutor()
executor.add_node(listener)
try:
executor.spin()
except KeyboardInterrupt:
pass
finally:
rclpy.shutdown()
if __name__ == '__main__':
main()
| apache-2.0 | 8,270,700,819,787,666,000 | 33.842105 | 88 | 0.67145 | false |
tomvansteijn/xsb | xsboringen/geffiles.py | 1 | 13932 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Tom van Steijn, Royal HaskoningDHV
from xsboringen.borehole import Borehole, Segment, Vertical
from xsboringen.cpt import CPT
from xsboringen import utils
from collections import defaultdict, namedtuple
from pathlib import Path
import textwrap
import logging
import os
log = logging.getLogger(os.path.basename(__file__))
def boreholes_from_gef(folder, classifier=None, fieldnames=None):
geffiles = utils.careful_glob(folder, '*.gef')
for geffile in geffiles:
gef = GefBoreholeFile(geffile, classifier, fieldnames)
borehole = gef.to_borehole()
if borehole is not None:
yield borehole
def cpts_from_gef(folder, datacolumns=None, classifier=None, fieldnames=None):
geffiles = utils.careful_glob(folder, '*.gef')
for geffile in geffiles:
gef = GefCPTFile(geffile, classifier, fieldnames)
cpt = gef.to_cpt(datacolumns)
if cpt is not None:
yield cpt
class GefFile(object):
# GEF field names
FieldNames = namedtuple('FieldNames',
['columnsep','recordsep', 'code', 'measurementcode', 'xy', 'z'])
# GEF measurement vars codes
MeasurementVars = namedtuple('MeasurementVars',
['depth', ])
# GEF default field names
_defaultfieldnames = {
'columnsep': 'COLUMNSEPARATOR',
'recordsep': 'RECORDSEPARATOR',
'code': 'TESTID',
'measurementcode': 'MEASUREMENTCODE',
'xy': 'XYID',
'z': 'ZID',
}
# format field
_format = None
# GEF header sections
ColumnInfo = namedtuple('ColumnInfo',
['number', 'unit', 'name', 'quantity_number'])
ColumnVoid = namedtuple('ColumnVoid',
['number', 'value'])
MeasurementText = namedtuple('MeasurementText',
['number', 'value', 'name']
)
MeasurementVar = namedtuple('MeasurementVar',
['number', 'value', 'unit', 'name']
)
SpecimenText = namedtuple('SpecimenText',
['number', 'value', 'name']
)
SpecimenVar = namedtuple('SpecimenVar',
['number', 'value', 'unit', 'name']
)
# GEF measurementvar codes
_defaultmeasurementvars = {
'depth': 16,
}
def __init__(self, geffile,
classifier=None,
fieldnames=None,
measurementvars=None,
):
self.file = Path(geffile).resolve()
self.attrs = {
'source': self.file.name,
'format': self._format,
}
self.classifier = classifier
if fieldnames is not None:
self.fieldnames = self.FieldNames(**fieldnames)
else:
self.fieldnames = self.FieldNames(**self._defaultfieldnames)
if measurementvars is not None:
self.measurementvars = self.MeasurementVars(**measurementvars)
else:
self.measurementvars = self.MeasurementVars(
**self._defaultmeasurementvars
)
@staticmethod
def safe_int(s):
try:
return int(s)
except ValueError:
return None
@staticmethod
def safe_float(s):
try:
return float(s)
except:
return None
@staticmethod
def read_headerline(lines):
line = next(lines)
var, values = line.split('=', maxsplit=1)
return (
var.lstrip('#').strip(),
values.strip(),
)
@classmethod
def read_header(cls, lines):
header = {}
var, values = cls.read_headerline(lines)
while var != 'EOH':
if var == 'COLUMNINFO':
if var not in header:
header[var] = {}
number, unit, name, quantity_number = values.split(',', 3)
columninfo = cls.ColumnInfo(
cls.safe_int(number),
unit.strip(),
name.strip(),
cls.safe_int(quantity_number),
)
header[var][columninfo.number] = columninfo
elif var == 'COLUMNVOID':
if var not in header:
header[var] = {}
number, na_value = values.split(',', 1)
columnvoid = cls.ColumnVoid(
cls.safe_int(number),
cls.safe_float(na_value),
)
header[var][columnvoid.number] = columnvoid
elif var == 'MEASUREMENTTEXT':
if var not in header:
header[var] = {}
number, value, name = values.split(',', 2)
measurementtext = cls.MeasurementText(
cls.safe_int(number),
value.strip(),
name.strip(),
)
header[var][measurementtext.number] = measurementtext
elif var == 'MEASUREMENTVAR':
if var not in header:
header[var] = {}
number, value, unit, *name = values.split(',', 3)
if not name:
name = ''
else:
name = name[0]
measurementvar = cls.MeasurementVar(
cls.safe_int(number),
cls.safe_float(value),
unit.strip(),
name.strip(),
)
header[var][measurementvar.number] = measurementvar
elif var == 'SPECIMENTEXT':
if var not in header:
header[var] = {}
number, value, name = values.split(',', 2)
specimentext = cls.SpecimenText(
cls.safe_int(number),
value.strip(),
name.strip(),
)
header[var][specimentext.number] = specimentext
elif var == 'SPECIMENVAR':
if var not in header:
header[var] = {}
number, value, unit, name = values.split(',', 3)
specimenvar = cls.SpecimenVar(
cls.safe_int(number),
cls.safe_float(value),
unit.strip(),
name.strip(),
)
header[var][specimenvar.number] = specimenvar
else:
header[var] = values.split(',')
var, values = cls.read_headerline(lines)
return header
class GefBoreholeFile(GefFile):
_format = 'GEF Borehole'
@classmethod
def read_segments(cls, lines, columnsep, recordsep):
for line in lines:
line = line.rstrip(recordsep)
attrs = {}
top, base, *remainder = line.split(columnsep)
try:
lithologycolor = remainder[0].replace('\'', '').strip() or None
except IndexError:
lithologycolor = None
try:
sandmedianclass = remainder[1].replace('\'', '').strip() or None
except IndexError:
sandmedianclass = None
try:
comment = remainder[2].replace('\'', '').strip() or None
except IndexError:
comment = None
top = cls.safe_float(top)
base = cls.safe_float(base)
if lithologycolor is not None:
lithology, *color = lithologycolor.split(maxsplit=1)
attrs['color'] = color
else:
lithology = None
if sandmedianclass is not None:
sandmedianclass, *_ = sandmedianclass.split(maxsplit=1)
if comment is not None:
attrs['comment'] = comment
yield Segment(top, base, lithology, sandmedianclass, **attrs)
@staticmethod
def depth_from_segments(segments):
log.debug('calculating depth from segments')
return max(s.base for s in segments)
def to_borehole(self):
log.debug('reading {file:}'.format(file=os.path.basename(self.file)))
with open(self.file) as f:
lines = (l.rstrip('\n') for l in f if len(l.strip()) > 0)
header = self.read_header(lines)
# column separator
if self.fieldnames.columnsep in header:
columnsep, *_ = header[self.fieldnames.columnsep]
else:
columnsep = None
# record separator
if self.fieldnames.recordsep in header:
recordsep, *_ = header[self.fieldnames.recordsep]
else:
recordsep = None
# segments
segments = [
s for s in self.read_segments(lines, columnsep, recordsep)
]
# classify lithology and admix
if self.classifier is not None:
for segment in segments:
segment.update(self.classifier.classify(segment.lithology))
# code
try:
code = header[self.fieldnames.code][0].strip()
except KeyError:
log.warning(
(
'no value for \'{s.fieldnames.code:}\' in {s.file.name:},\n'
'skipping this file'
).format(s=self))
return
# depth
try:
depth = header['MEASUREMENTVAR'][self.measurementvars.depth].value
except KeyError:
depth = self.depth_from_segments(segments)
# x, y
_, x, y, *_ = header[self.fieldnames.xy]
x = self.safe_float(x)
y = self.safe_float(y)
# z
if self.fieldnames.z in header:
_, z, *_ = header[self.fieldnames.z]
z = self.safe_float(z)
else:
z = None
return Borehole(code, depth,
x=x, y=y, z=z,
segments=segments,
**self.attrs,
)
class GefCPTFile(GefFile):
_format = 'GEF CPT'
Columns = namedtuple('GefCPTColumns',
['depth', 'cone_resistance', 'friction_ratio'])
_defaultdatacolumns = {
'depth': 'gecorrigeerde diepte',
'cone_resistance': 'conusweerstand',
'friction_ratio': 'wrijvingsgetal',
}
@classmethod
def read_verticals(cls, lines, selected_columns, na_values, columnsep, recordsep):
items = defaultdict(list)
for line in lines:
line = line.rstrip(recordsep)
if columnsep is None:
valuestrs = [v for v in line.split() if v.strip()]
else:
valuestrs = line.split(columnsep)
for i, valuestr in enumerate(valuestrs):
column = selected_columns.get(i + 1)
na_value = na_values.get(i + 1)
if column is not None:
value = cls.safe_float(valuestr)
if value == na_value:
value = None
items[column].append(value)
try:
depth = items.pop('depth')
except KeyError:
depth = None
verticals = {}
for key, values in items.items():
verticals[key] = Vertical(name=key, depth=depth, values=values)
return verticals
@staticmethod
def depth_from_verticals(verticals, field='friction_ratio'):
log.debug('calculating depth from verticals')
try:
return verticals[field].depth[-1]
except KeyError:
return None
except IndexError:
return None
def to_cpt(self, datacolumns=None):
log.debug('reading {file:}'.format(file=os.path.basename(self.file)))
datacolumns = datacolumns or self._defaultdatacolumns
with open(self.file) as f:
lines = (l.rstrip('\n') for l in f if len(l.strip()) > 0)
header = self.read_header(lines)
# selected columns
column_mapping = {v: k for k, v in datacolumns.items()}
selected_columns = {
i: column_mapping.get(ci.name)
for i, ci in header['COLUMNINFO'].items()
}
# column na values
na_values = {
i: cv.value
for i, cv in header['COLUMNVOID'].items()
}
# column separator
if self.fieldnames.columnsep in header:
columnsep, *_ = header[self.fieldnames.columnsep]
else:
columnsep = None
# record separator
if self.fieldnames.recordsep in header:
recordsep, *_ = header[self.fieldnames.recordsep]
else:
recordsep = None
# verticals
verticals = self.read_verticals(lines,
selected_columns,
na_values,
columnsep,
recordsep,
)
# code
try:
code = header[self.fieldnames.code][0]
except KeyError:
log.warning(
(
'no value for \'{s.fieldnames.code:}\' in {s.file.name:},\n'
'skipping this file'
).format(s=self))
return
# depth
try:
depth = header['MEASUREMENTVAR'][self.measurementvars.depth].value
except KeyError:
depth = self.depth_from_verticals(verticals)
# x, y
_, x, y, *_ = header[self.fieldnames.xy]
x = self.safe_float(x)
y = self.safe_float(y)
# z
if self.fieldnames.z in header:
_, z, *_ = header[self.fieldnames.z]
z = self.safe_float(z)
else:
z = None
return CPT(code, depth,
x=x, y=y, z=z,
verticals=verticals,
**self.attrs,
)
| gpl-3.0 | 3,781,907,062,404,658,700 | 31.027586 | 86 | 0.50567 | false |
surik00/ESP8266 | Bedroom (2 chips, 4 strips)/alarm.py | 1 | 2909 | import time
import logging
import datetime
import paho.mqtt.client as mqtt
from apscheduler.schedulers.background import BlockingScheduler
logging.basicConfig(format=u'%(filename)s [LINE:%(lineno)d]#%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.WARNING, filename='./alarm.log')
BROKER_ADDRESS = "localhost"
WAKE_HOUR = 8
WAKE_MINUTE = 30
TIMEOUT_STEP = 21
MINUTES_BEFORE = 35
def mkthree(color):
line = str(color)
while len(line) < 3:
line = "0" + line
return line
class AlarmClock(object):
def __init__(self):
super(AlarmClock, self).__init__()
self.is_running = True
logging.info('Inited AlarmClock')
def lightit(self):
logging.info('Task started')
self.is_running = True
client = mqtt.Client("AlarmClock")
client.on_message = self._on_message
client.connect(BROKER_ADDRESS)
client.subscribe([("/home/RGBLight1", 0), ("/home/RGBLight2", 0)])
client.loop_start()
client.publish("/home/RGBLight1", "pl1S")
client.publish("/home/RGBLight1", "pr1S")
client.publish("/home/RGBLight2", "pl1S")
client.publish("/home/RGBLight2", "pr1S")
time.sleep(.1)
client.publish("/home/RGBLight1", "cl001,001,001S")
client.publish("/home/RGBLight1", "cr001,001,001S")
client.publish("/home/RGBLight2", "cl001,001,001S")
client.publish("/home/RGBLight2", "cr001,001,001S")
time.sleep(.1)
for i in range(1, 101):
if self.is_running:
color = mkthree(round(255 * (i / 100)))
client.publish("/home/RGBLight1", "cl{},{},{}S".format(color, color, color))
client.publish("/home/RGBLight1", "cr{},{},{}S".format(color, color, color))
client.publish("/home/RGBLight2", "cl{},{},{}S".format(color, color, color))
client.publish("/home/RGBLight2", "cr{},{},{}S".format(color, color, color))
time.sleep(TIMEOUT_STEP)
logging.info('Task finished')
def _on_message(self, client, userdata, message):
logging.debug('Message: {}'.format(message.payload))
msg = message.payload.decode()
if not msg.endswith('S'):
client.loop_stop()
self.is_running = False
logging.info('Stopped execution in case of new internal updates')
if __name__ == '__main__':
logging.debug('Started main')
scheduler = BlockingScheduler()
alarm_start_time = datetime.datetime(1970, 1, 1, WAKE_HOUR, WAKE_MINUTE) - datetime.timedelta(seconds=(TIMEOUT_STEP * 100) + (MINUTES_BEFORE * 60))
logging.info('Counted datetime: {}'.format(str(alarm_start_time)))
alarm = AlarmClock()
scheduler.add_job(alarm.lightit, 'cron', day_of_week='mon-fri', hour=alarm_start_time.hour, minute=alarm_start_time.minute)
scheduler.start()
| gpl-3.0 | -5,868,073,514,299,785,000 | 34.048193 | 151 | 0.610519 | false |
tensorflow/model-optimization | tensorflow_model_optimization/python/core/internal/tensor_encoding/utils/__init__.py | 1 | 2108 | # Copyright 2019, The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the `tensor_encoding` package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import assert_compatible
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import merge_dicts
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import OrderedEnum
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import split_dict_py_tf
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import static_or_dynamic_shape
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import fast_walsh_hadamard_transform
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import pack_into_int
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_floats
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_floats_cmwc
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_signs
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_signs_cmwc
from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import unpack_from_int
| apache-2.0 | 7,420,145,395,783,279,000 | 64.875 | 123 | 0.821157 | false |
alexpotter1/Neutron_msm8974_d802 | build/buildDaemon.py | 1 | 22175 | #!/usr/bin/env python3
'''
Build Daemon for the Neutron compilation process.
Designed to run on Debian (Linux) enviroments; this will fail on other OSes.
Copyright Alex Potter 2015.
'''
import sys
import subprocess
import time
import datetime
import os
import sqlite3
import threading
import re
import itertools
import glob
class bcolours:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class InitStub:
def __init__(self):
self.conn = None
self.cursor = None
self.CPUcores = None
self.CPUversion = None
self.localversion = "-Neutron-"
self.dt = None
self.dtbSucceeded = None
self.lines = None
self.spinnerShutdown = None
self.data = None
self.checkEnv()
def spinner(self):
spinner = itertools.cycle(['-','/','|','\\'])
import time
while self.spinnerShutdown == 0:
sys.stdout.write(next(spinner))
sys.stdout.flush()
sys.stdout.write('\b')
time.sleep(0.15)
return
def checkEnv(self):
# Checking CPU info
p = subprocess.Popen("grep -c ^processor /proc/cpuinfo", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
self.CPUcores = int(line.rstrip())
p = subprocess.Popen('cat /proc/cpuinfo | grep "model name"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
self.CPUversion = line.rstrip()
# Check for build tools
print("Checking build environment...")
time.sleep(0.8)
p = subprocess.Popen("apt --installed list", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
outString = str(output)
InstallList = ["bison", "build-essential", "curl", "flex", "git", "gnupg", "gperf",
"libesd0-dev", "liblz4-tool", "libncurses5-dev", "libsdl1.2-dev", "libwxgtk2.8-dev",
"libxml2", "libxml2-utils", "lzop", "openjdk-7-jdk", "openjdk-7-jre", "pngcrush",
"schedtool", "squashfs-tools", "xsltproc", "zip", "g++-multilib",
"gcc-multilib", "lib32ncurses5-dev", "lib32readline-gplv2-dev", "lib32z1-dev", "pv", "openjdk-7-jre-headless", "bc"]
for program in InstallList:
if not program in outString:
subprocess.call("sudo apt-get install %s" % program, shell=True)
#subprocess.call("sudo apt-get install openjdk-7-jre-headless", shell=True)
print(bcolours.OKGREEN + "OK: Build Environment" + bcolours.ENDC)
print("Checking Java version...")
if os.path.isfile("/usr/bin/java"):
time.sleep(0.5)
p = subprocess.Popen("java -version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
outString = str(err)[2:]
outString = outString[:-1]
if '"1.7.0' in outString and "OpenJDK" in outString:
print(bcolours.OKGREEN + "OK: Java Runtime Environment version: OpenJDK 1.7.0" + bcolours.ENDC)
else:
print(outString)
print(bcolours.WARNING + "WARNING: Check Java version before continuing" + bcolours.ENDC)
raise SystemExit
else:
print(bcolours.FAIL + "FAIL: Java not installed" + bcolours.ENDC)
print("Installing OpenJDK 7...")
subprocess.call("sudo apt-get install openjdk-7-jre openjdk-7-jdk openjdk-7-jre-headless", shell=True)
self.checkEnvVariables()
def checkEnvVariables(self):
CROSS_COMPILE = os.environ.get('CROSS_COMPILE')
print("Checking toolchain path...")
time.sleep(0.5)
if CROSS_COMPILE == None:
print(bcolours.FAIL + "FAIL: Toolchain path not set. Compilation will fail." + bcolours.ENDC)
else:
print(bcolours.OKGREEN + "OK: Toolchain path" + bcolours.ENDC)
print(bcolours.OKBLUE + "Using toolchain path %s" % CROSS_COMPILE + bcolours.ENDC)
self.conn = sqlite3.connect("build/neutronBuild.db")
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS UserDefaults (VariableKey TEXT, StringChoice TEXT);")
self.cursor.execute('SELECT * FROM {tn} WHERE {cn}="SaberMod"'.format(tn="UserDefaults", cn="VariableKey"))
data = self.cursor.fetchall()
if len(data) == 0:
sabermod_choice = input("Are you using a SaberMod GCC toolchain? (y/n): ")
self.cursor.execute("INSERT INTO UserDefaults VALUES (?, ?);", ("SaberMod", sabermod_choice.upper()))
self.conn.commit()
self.cursor.execute('SELECT * FROM {tn} WHERE {cn}="SaberMod"'.format(tn="UserDefaults", cn="VariableKey"))
data = self.cursor.fetchall()
SaberMod_persistent_choice = data[0][1]
if SaberMod_persistent_choice == "Y":
if not os.path.isdir("/usr/include/cloog") or not os.path.isfile("/usr/lib/libisl.a"):
print(bcolours.FAIL + "FAIL: Extra SaberMod prebuilts are not installed correctly." + bcolours.ENDC)
else:
print(bcolours.OKGREEN + "OK: SaberMod libraries detected" + bcolours.ENDC)
self.spinnerShutdown = 0
spinningThread = threading.Thread(target=self.spinner)
spinningThread.start()
time.sleep(3.5)
self.spinnerShutdown = 1
subprocess.call("clear", shell=True)
self.setupBuildPrelim()
def setupBuild(self, error=None):
time.sleep(1)
if os.path.isfile("arch/arm/boot/zImage-dtb"):
choice = input("Previous zImage detected. Do you want to make clean? (y/n): ")
if choice.upper() == "N":
error = 1
self.buildInit(zImageExists=True)
if error == None:
print("Last build was successful. Running make clean...")
self.spinnerShutdown = 0
spinnerThread = threading.Thread(target=self.spinner)
spinnerThread.start()
time.sleep(0.5)
p = subprocess.Popen("make clean && make mrproper", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
if rc == 0:
if os.path.isfile("/arch/arm/boot/compressed/piggy.lz4"):
subprocess.call("rm /arch/arm/boot/compressed/piggy.lz4")
if os.path.isfile("arch/arm/boot/zImage"):
subprocess.call("rm arch/arm/boot/zImage && rm arch/arm/boot/zImage-dtb", shell=True)
if os.path.isfile("zip/boot.img"):
subprocess.call("rm zip/Neutron*", shell=True)
subprocess.call("rm zip/boot.img", shell=True)
subprocess.call("rm boot.img", shell=True)
print(bcolours.OKGREEN + "OK: Cleaned build directories" + bcolours.ENDC)
else:
print(bcolours.WARNING + "WARNING: make clean failed" + bcolours.ENDC)
self.spinnerShutdown = 1
ramdisk_choice = input("Do you want to generate a new ramdisk? (y/n): ")
if ramdisk_choice.upper() == "Y":
print("Please download a boot.img file for your device (maybe in CM zips?)")
print("If you've already downloaded one, just press Enter at the next prompt.")
input("Press Enter when you have extracted that boot.img into the executables/stockBoot folder: ")
if os.path.isfile("executables/stockBoot/boot.img"):
print("Processing ramdisk...")
time.sleep(1)
subprocess.call("executables/split_boot executables/stockBoot/boot.img", shell=True)
if os.path.isfile("executables/ramdisk.gz"):
subprocess.call("rm executables/ramdisk.gz", shell=True)
p = subprocess.Popen("executables/mkbootfs boot/ramdisk | gzip > executables/ramdisk.gz", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
if rc == 0:
print(bcolours.OKGREEN + "OK: Ramdisk generated" + bcolours.ENDC)
else:
print(bcolours.FAIL + "FAIL: Ramdisk generation error" + bcolours.ENDC)
subprocess.call("rm -r boot", shell=True)
else:
print("Please download the boot.img file for your device, and make sure it is in the correct folder.")
raise SystemExit
time.sleep(0.5)
print("Generating dt.img from sources...")
subprocess.call("rm executables/dt.img", shell=True)
p = subprocess.Popen("executables/dtbTool -s 2048 -o executables/dt.img -p scripts/dtc/ arch/arm/boot/", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
if "Found 3 unique" in str(output):
print(bcolours.OKGREEN + "OK: DT image generated" + bcolours.ENDC)
else:
print(bcolours.WARNING + "WARNING: DT image generation failed" + bcolours.ENDC)
print(bcolours.WARNING + "Attempting manual generation..." + bcolours.ENDC)
time.sleep(0.2)
dtcFile = ['msm8974-g2-open_com', 'msm8974-v2-2-g2-open_com', 'msm8974-v2-g2-open_com']
pos = 0
dt = ""
self.dtbSucceeded = 0
while pos <= 2:
p = subprocess.Popen("scripts/dtc/dtc -I dts -O dtb -o arch/arm/boot/%s.dtb arch/arm/boot/dts/lge/msm8974-g2/msm8974-g2-open_com/%s.dts" % (dtcFile[pos], dtcFile[pos]),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
dt = line.rstrip()
if dt == "":
self.dtbSucceeded += 1
pos += 1
if self.dtbSucceeded == 3:
p = subprocess.Popen("executables/dtbTool -s 2048 -o executables/dt.img -p scripts/dtc/ arch/arm/boot/", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
if "Found 3 unique" in str(output):
print(bcolours.OKGREEN + "OK: device tree image generated successfully" + bcolours.ENDC)
else:
print(bcolours.WARNING + "WARNING: Not all device tree binaries exist but device tree image generated successfully" + bcolours.ENDC)
else:
print(bcolours.FAIL + "FAIL: device tree image generation failed" + bcolours.ENDC)
self.localversion += str(input("Enter new version string: "))
self.buildInit(localversionarg=1) # use the localversion that the user entered
else:
self.buildInit(localversionarg=0) # use the localversion that is stored in SQLite db
def setupBuildPrelim(self):
print("---------------------------------------------------------------------------------")
print(bcolours.HEADER + "Neutron Build preparation" + bcolours.ENDC)
print("---------------------------------------------------------------------------------")
time.sleep(1)
self.cursor.execute("CREATE TABLE IF NOT EXISTS BuildFailure (FailureReason TEXT, FileName TEXT, KernelVersion TEXT, DateTime TEXT)")
self.cursor.execute('SELECT * FROM {tn} WHERE {cn}="Compile Error"'.format(tn="BuildFailure", cn="FailureReason"))
self.data = self.cursor.fetchall()
if len(self.data) == 0:
self.cursor.execute('SELECT * FROM {tn} WHERE {cn}="Linker Error"'.format(tn="BuildFailure", cn="FailureReason"))
dataLinker = self.cursor.fetchall()
if len(dataLinker) == 0:
self.setupBuild()
else:
print(bcolours.FAIL + "An incomplete build was detected." + bcolours.ENDC)
print("Error Reason: %s" % data[0][0])
print("File Name: %s" % data[0][1])
print("Kernel Version: %s" % data[0][2])
print("Date: %s" % data[0][3])
print("---------------------------------------------------------------------------------")
self.cursor.execute('DELETE FROM {tn} WHERE {cn}="Linker Error"'.format(tn='BuildFailure', cn="FailureReason"))
self.conn.commit()
self.setupBuild(error=1)
else:
print(bcolours.FAIL + "An incomplete build was detected." + bcolours.ENDC)
print("Error Reason: %s" % self.data[0][0])
print("File Name: %s" % self.data[0][1][:60])
print("Kernel Version: %s" % self.data[0][2])
print("Date: %s" % self.data[0][3])
print("---------------------------------------------------------------------------------")
self.cursor.execute('DELETE FROM {tn} WHERE {cn}="Compile Error"'.format(tn='BuildFailure', cn='FailureReason'))
self.conn.commit()
clean = input("Do you want to discard this build? (y/n): ")
print(clean)
if clean.upper() == "N":
self.setupBuild(error=1)
elif clean.upper() == "Y":
self.setupBuild()
else:
raise SystemExit
def buildInit(self, localversionarg=0, zImageExists=False):
if zImageExists == True:
self.createFlashableZip()
else:
if localversionarg == 0:
localversion = str(self.data[0][2])
self.localversion += localversion
makeThread = threading.Thread(target=self.buildMake)
makeThread.start()
else:
localversion = self.localversion
subprocess.call("clear", shell=True)
print("---------------------------------------------------------------------------------")
print(bcolours.HEADER + "Neutron Build Process" + bcolours.ENDC)
print("---------------------------------------------------------------------------------")
print(bcolours.BOLD + "BUILD VARIABLES" + bcolours.ENDC)
self.cursor.execute('SELECT * FROM {tn} WHERE {cn}="SaberMod"'.format(tn="UserDefaults", cn="VariableKey"))
data = self.cursor.fetchall()
path = str(os.environ.get('CROSS_COMPILE'))
version = re.search('el/(.*)/b', path)
if len(data) == 0:
print(bcolours.OKBLUE + "Toolchain version: %s" % str(version.group(1)) + bcolours.ENDC)
else:
print(bcolours.OKBLUE + "Toolchain version: %s" % str(version.group(1)) + " " + "SaberMod GCC" + bcolours.ENDC)
print(bcolours.OKBLUE + "Toolchain path: %s" % path + bcolours.ENDC)
print(bcolours.OKBLUE + "Kernel version: %s" % localversion + bcolours.ENDC)
p = subprocess.Popen("uname -o -n -i -v -r", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
self.lines = line.rstrip()
print(bcolours.OKBLUE + "Host: %s" % str(self.lines.decode('utf-8')) + bcolours.ENDC)
print(bcolours.OKBLUE + "CPU: %s with %i core(s)" % (self.CPUversion.decode("utf-8"), self.CPUcores) + bcolours.ENDC)
print(" ")
OK = input("If this is okay, press Enter to continue or Q to quit...")
if OK.upper() == "Q":
raise SystemExit
else:
self.conn.close()
buildThread = threading.Thread(target=self.build)
buildThread.start()
def createFlashableZip(self):
self.spinnerShutdown = 1
subprocess.call("cp executables/dt.img zip/setup/dt.img", shell=True)
print(bcolours.OKBLUE + "Moving kernel modules..." + bcolours.ENDC)
time.sleep(0.5)
subprocess.call('find . -name "*.ko" -type f -exec cp {} zip/modules \;', shell=True)
print(bcolours.OKBLUE + "Packing into flashable zip..." + bcolours.ENDC)
time.sleep(0.5)
subprocess.call("rm zip/Neutron*.zip", shell=True)
try:
subprocess.call("cp arch/arm/boot/zImage-dtb zip/setup/zImage-dtb", shell=True)
cmd = 'cd zip && zip -r -9 "' + (str(self.localversion)[1:] + '.zip') + '" *'
os.system(cmd)
except TypeError:
cmd = 'cd zip && zip -r -9 "Neutron-undefined.zip" *'
os.system(cmd)
print(bcolours.OKBLUE + "Signing zip file..." + bcolours.ENDC)
if not os.path.isdir('build/openssl'):
subprocess.call("mkdir -p build/openssl", shell=True)
if os.listdir('build/openssl') == []:
print("Generating OpenSSL certificates...")
time.sleep(0.3)
print("Follow the prompts on screen.")
time.sleep(2)
subprocess.call("cd build/openssl && openssl genrsa -out sign.key 8192; openssl req -new -key sign.key -out request.pem; openssl x509 -req -days 9999 -in request.pem -signkey sign.key -out certificate.pem; openssl pkcs8 -topk8 -outform DER -in sign.key -inform PEM -out key.pk8 -nocrypt", shell=True)
path = glob.glob("zip/*.zip")[0]
signed_name = str(self.localversion)[1:] + "-signed" + ".zip"
subprocess.call("java -jar build/signapk.jar build/openssl/certificate.pem build/openssl/key.pk8 %s zip/%s" % (path, signed_name), shell=True)
#subprocess.call("build/zipadjust zip/Neutron-signed.zip zip/Neutron-fixed.zip; rm zip/Neutron-signed.zip", shell=True)
#subprocess.call("java -jar build/minsignapk.jar build/openssl/certificate.pem build/openssl/key.pk8 zip/Neutron-fixed.zip zip/Neutron-%s; rm zip/Neutron-fixed.zip" % signed_name, shell=True)
print(bcolours.OKGREEN + "Done! Closing processes..." + bcolours.ENDC)
subprocess.call("cd zip; find . -type f -not -name '*-signed.zip' | xargs rm", shell=True)
subprocess.call("rm include/generated/compile.h", shell=True)
time.sleep(2)
raise SystemExit
def build(self):
import time
print("---------------------------------------------------------------------------------")
time.sleep(0.6)
os.environ['LOCALVERSION'] = self.localversion
print("Preparing defconfig...")
subprocess.call(["make", "cyanogenmod_d802_defconfig"])
print("Preparing menuconfig...")
subprocess.call(["make", "menuconfig"])
print("Preparing kernelrelease...")
p = subprocess.Popen(["make", "kernelrelease"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
if self.localversion in output.decode('utf-8'):
print(bcolours.OKGREEN + "OK: Kernel Version set correctly" + bcolours.ENDC)
else:
print(bcolours.WARNING + "WARNING: Kernel Version not set correctly" + bcolours.ENDC)
makeThread = threading.Thread(target=self.buildMake)
makeThread.start()
def buildMake(self):
import time
import sqlite3
import datetime
import subprocess
time.sleep(0.5)
print("---------------------------------------------------------------------------------")
print(bcolours.BOLD + "Building..." + bcolours.ENDC)
time.sleep(0.5)
coreArg = "-j%i" % self.CPUcores
spinnerThread = threading.Thread(target=self.spinner)
self.spinnerShutdown = 0
spinnerThread.start()
time.sleep(3)
p = subprocess.Popen(['make', coreArg], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outLog = []
for line in iter(p.stdout.readline, b''):
print(str(line.rstrip().decode('utf-8')))
outLog.append(" " + str(line.rstrip()))
lastFile = None
succeed = None
for i, s in enumerate(outLog):
if "Error" in s or "error" in s or "ERROR" in s:
lastFile = outLog[i-1]
if "arch/arm/boot/zImage-dtb is ready" in s:
succeed = outLog[i]
if lastFile == None or succeed is not None:
print(bcolours.OKGREEN + "OK: Build succeeded" + bcolours.ENDC)
else:
time = datetime.datetime.now().strftime("%a %d %b %H:%M")
print(bcolours.FAIL + "FAIL: Build error" + bcolours.ENDC)
self.spinnerShutdown = 1
self.conn = sqlite3.connect("build/quantumBuild.db")
self.cursor = self.conn.cursor()
self.cursor.execute("INSERT INTO BuildFailure VALUES (?, ?, ?, ?);", ("Compile Error", lastFile, self.localversion, time) )
self.conn.commit()
self.conn.close()
raise SystemExit
self.createFlashableZip()
# Initial display of program in terminal
subprocess.call("clear", shell=True)
print("---------------------------------------------------------------------------------")
print(bcolours.HEADER + "Neutron d802 Debian/Linux kernel build tool by Alex Potter (alexpotter1)" + bcolours.ENDC)
print(bcolours.HEADER + "Please only run on Linux (Debian, Ubuntu, etc)." + bcolours.ENDC)
print(bcolours.HEADER + "Version v3.0" + bcolours.ENDC)
print("---------------------------------------------------------------------------------")
app = InitStub()
| gpl-2.0 | 260,694,615,312,053,120 | 48.277778 | 312 | 0.554453 | false |
andela-uawili/django-bucketlist-application | bucketlist/dashboard/urls.py | 1 | 1550 | from django.conf.urls import url
from views import BucketListsView, \
BucketListCreateView, BucketListUpdateView, \
BucketListDeleteView, BucketListDetailView, \
BucketListItemCreateView, BucketListItemUpdateView, \
BucketListItemDoneView, BucketListItemDeleteView
urlpatterns = [
# bucket list routes:
url(r'^bucketlists/$',
BucketListsView.as_view(),
name='bucketlists'),
url(r'^bucketlists/create/$',
BucketListCreateView.as_view(),
name='bucketlist_create'),
url(r'^bucketlists/(?P<pk>[0-9]+)/update/$',
BucketListUpdateView.as_view(),
name='bucketlist_update'),
url(r'^bucketlists/(?P<pk>[0-9]+)/delete/$',
BucketListDeleteView.as_view(),
name='bucketlist_delete'),
url(r'^bucketlists/(?P<pk>[0-9]+)/$',
BucketListDetailView.as_view(),
name='bucketlist_details'),
# bucket list item routes:
url(r'^bucketlists/(?P<pk>[0-9]+)/items/create$',
BucketListItemCreateView.as_view(),
name='bucketlist_item_create'),
url(r'^bucketlists/(?P<pk>[0-9]+)/items/(?P<item_pk>[0-9]+)/update/$',
BucketListItemUpdateView.as_view(),
name='bucketlist_item_update'),
url(r'^bucketlists/(?P<pk>[0-9]+)/items/(?P<item_pk>[0-9]+)/done/$',
BucketListItemDoneView.as_view(),
name='bucketlist_item_done'),
url(r'^bucketlists/(?P<pk>[0-9]+)/items/(?P<item_pk>[0-9]+)/delete/$',
BucketListItemDeleteView.as_view(),
name='bucketlist_item_delete'),
]
| mit | -6,131,289,241,656,271,000 | 28.807692 | 74 | 0.624516 | false |
keithasaurus/django_fun_views | tests/patterns/test_redirect.py | 1 | 1473 | from fun_views.patterns.redirect import redirect_view_pattern
from tests.utils.defaults_for_tests import (default_query_string,
default_response, default_url)
def this_get_url():
return default_url
def this_get_query_string():
return default_query_string
def test_functions_are_called_as_expected():
for this_use_query_string in (True, False):
for this_permanent in (True, False):
this_url = "{}?{}".format(default_url, default_query_string) \
if this_use_query_string \
else default_url
def this_get_use_query_string(query_string):
assert query_string == default_query_string
return this_use_query_string
def this_get_permanent(url):
assert url == this_url
return this_permanent
def this_do_redirect(url, permanent):
assert url == this_url
assert permanent == this_permanent
return default_response
assert default_response == redirect_view_pattern(this_get_url,
this_get_query_string,
this_get_use_query_string,
this_get_permanent,
this_do_redirect)
| mit | 5,001,551,975,581,874,000 | 37.763158 | 87 | 0.501697 | false |
hmenager/pyacd | pyacd/qaparser.py | 1 | 5485 | """
parser module for EMBOSS QA files
"""
from pyparsing import Optional, Suppress, Word, OneOrMore, ZeroOrMore, \
printables, Group, alphanums, alphas, restOfLine, oneOf, nums
from .qa import ApplicationRef, FilePattern, FileGroup, Qa, CommandLine, \
InputLine
TEST_ID = Suppress("ID") + Word(alphanums + '-' + '_')('id')
APPLICATION_REF = oneOf(['AP', 'AA', 'AQ']) + Word(alphas)('appname') + \
Optional(
Suppress('AB') + Word(alphas)('embassypack'))
def _get_application_ref(token):
return ApplicationRef(token['appname'], token.get('embassypack',None))
APPLICATION_REF.setParseAction(_get_application_ref)
CL_LINE = Suppress("CL ") + restOfLine('line')
def _get_cl_line(token):
return CommandLine(token['line'])
CL_LINE.setParseAction(_get_cl_line)
CL_LINES = Group(ZeroOrMore(CL_LINE))('cl_lines')
def _get_cl_lines(token):
return token['cl_lines']
CL_LINES.setParseAction(_get_cl_lines)
IN_LINE = Suppress("IN ") + restOfLine('line')
def _get_in_line(token):
return InputLine(token['line'])
IN_LINE.setParseAction(_get_in_line)
IN_LINES = Group(ZeroOrMore(IN_LINE))('in_lines')
def _get_in_lines(token):
return token['in_lines']
IN_LINES.setParseAction(_get_in_lines)
FILE_PATTERN = Suppress("FP") \
+ Optional(Word(nums))('count') \
+ Word(printables)('pattern')
def _get_file_pattern(token):
return FilePattern(token['pattern'], int(token.get('count')) if
token.get('count') else None)
FILE_PATTERN.setParseAction(_get_file_pattern)
FILE_GROUP = Suppress("FI") + Word(printables)('file') \
+ Optional(Suppress("FC") + oneOf(['<','=','>'])(
'lc_test_operator') + Word(nums)('lc_test_value')) \
+ Group(ZeroOrMore(FILE_PATTERN))('patterns') \
+ Optional(Suppress("FZ") + oneOf(['<','=','>'])(
'size_test_operator') + Word(nums)('size_test_value'))
def _get_file_group(token):
size_test = None
if token.get('size_test_operator'):
size_test = {'operator': token.get('size_test_operator'),
'value': int(token.get('size_test_value'))}
lc_test = None
if token.get('lc_test_operator'):
lc_test = {'operator': token.get('lc_test_operator'),
'value': int(token.get('lc_test_value'))}
return FileGroup(token['file'],line_count_test=lc_test,
patterns=token.get('patterns'),
size_test=size_test)
FILE_GROUP.setParseAction(_get_file_group)
FILE_GROUPS = Group(ZeroOrMore(FILE_GROUP))('files')
def _get_file_groups(token):
return token['files']
FILE_GROUPS.setParseAction(_get_file_groups)
TI_LINE = Suppress('TI ') & Word(nums)('time_limit')
def _get_time_limit(token):
return int(token['time_limit'])
TI_LINE.setParseAction(_get_time_limit)
UC_LINE = Suppress('UC ') & restOfLine('annotation_line')
def _get_annotation(token):
return token.get('annotation_line')
UC_LINE.setParseAction(_get_annotation)
UC_LINES = Group(ZeroOrMore(UC_LINE))('annotation_lines')
def _get_annotations(token):
return token['annotation_lines']
UC_LINES.setParseAction(_get_annotations)
RQ_LINE = Suppress('RQ ') & restOfLine('requirements')
def _get_requirements(token):
return token.get('requirements')
RQ_LINE.setParseAction(_get_requirements)
CC_LINE = Suppress('CC ') & restOfLine('comment_line')
def _get_comment(token):
return token.get('comment_line')[0]
CC_LINE.setParseAction(_get_comment)
CC_LINES = Group(ZeroOrMore(CC_LINE))('comment_lines')
def _get_comments(token):
return token['comment_lines']
CC_LINES.setParseAction(_get_comments)
PP_LINE = Suppress('PP ') & restOfLine('preprocess_line')
def _get_preprocess(token):
return token.get('preprocess_line')[0]
PP_LINE.setParseAction(_get_preprocess)
PP_LINES = Group(ZeroOrMore(PP_LINE))('preprocess_lines')
def _get_preprocesss(token):
return token['preprocess_lines']
PP_LINES.setParseAction(_get_preprocesss)
QA = TEST_ID & \
Group(Optional(TI_LINE) & \
Optional(Suppress('DL keep')) & \
Optional(Suppress('ER ') + Word(nums)('error_code')) & \
Optional(RQ_LINE) & \
CC_LINES & \
PP_LINES & \
UC_LINES) & \
APPLICATION_REF('appref') & CL_LINES & IN_LINES & FILE_GROUPS
#ignore comment lines
QA.ignore('#' + restOfLine)
def _get_qa(token):
return Qa(token['id'],token.get('uc',None),token['appref'], command_lines=token[
'cl_lines'], input_lines=token['in_lines'])
QA.setParseAction(_get_qa)
def parse_cl_line(string):
return CL_LINE.parseString(string)[0]
def parse_cl_lines(string):
return CL_LINES.parseString(string)
def parse_in_line(string):
return IN_LINE.parseString(string)[0]
def parse_in_lines(string):
return IN_LINES.parseString(string)
def parse_app_ref(string):
return APPLICATION_REF.parseString(string)[0]
def parse_file_pattern(string):
return FILE_PATTERN.parseString(string)[0]
def parse_file_group(string):
return FILE_GROUP.parseString(string)[0]
def parse_ti_line(string):
return TI_LINE.parseString(string)[0]
def parse_uc_line(string):
return UC_LINE.parseString(string)[0]
def parse_rq_line(string):
return RQ_LINE.parseString(string)[0]
def parse_cc_line(string):
return CC_LINE.parseString(string)[0]
def parse_cc_lines(string):
return CC_LINES.parseString(string)
def parse_qa(string):
""" parse a QA test item (one test case for one application)"""
return QA.parseString(string)[0]
| mit | 1,841,487,768,452,138,800 | 31.455621 | 84 | 0.668004 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/aio/operations/_routes_operations.py | 1 | 21208 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations:
"""RoutesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs
) -> "_models.Route":
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs
) -> "_models.Route":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs
) -> AsyncLROPoller["_models.Route"]:
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_07_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncIterable["_models.RouteListResult"]:
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
| mit | -7,718,265,728,073,358,000 | 48.32093 | 210 | 0.639617 | false |
inmagik/django-multi-gtfs | multigtfs/models/feed_info.py | 1 | 2310 | #
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from jsonfield import JSONField
from multigtfs.models.base import models, Base
@python_2_unicode_compatible
class FeedInfo(Base):
"""Information about the feed
Implements feed_info.txt in the GTFS feed.
"""
feed = models.ForeignKey('Feed')
publisher_name = models.CharField(
max_length=255,
help_text="Full name of organization that publishes the feed.")
publisher_url = models.URLField(
help_text="URL of the feed publisher's organization.")
lang = models.CharField(
"language",
max_length=20,
help_text="IETF BCP 47 language code for text in field.")
start_date = models.DateField(
null=True, blank=True,
help_text="Date that feed starts providing reliable data.")
end_date = models.DateField(
null=True, blank=True,
help_text="Date that feed stops providing reliable data.")
version = models.CharField(
max_length=255, blank=True,
help_text="Version of feed.")
extra_data = JSONField(default={}, blank=True, null=True)
def __str__(self):
return '%s-%s' % (self.feed.id, self.publisher_name)
class Meta:
db_table = 'feed_info'
app_label = 'multigtfs'
verbose_name_plural = "feed info"
_column_map = (
('feed_publisher_name', 'publisher_name'),
('feed_publisher_url', 'publisher_url'),
('feed_lang', 'lang'),
('feed_start_date', 'start_date'),
('feed_end_date', 'end_date'),
('feed_version', 'version')
)
_filename = 'feed_info.txt'
_unique_fields = ('feed_publisher_name',)
| apache-2.0 | -7,563,938,476,831,033,000 | 33.477612 | 74 | 0.663203 | false |
googleapis/python-video-transcoder | tests/unit/gapic/transcoder_v1beta1/test_transcoder_service.py | 1 | 117090 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import (
TranscoderServiceAsyncClient,
)
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import (
TranscoderServiceClient,
)
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import pagers
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import transports
from google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.video.transcoder_v1beta1.types import resources
from google.cloud.video.transcoder_v1beta1.types import services
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TranscoderServiceClient._get_default_mtls_endpoint(None) is None
assert (
TranscoderServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [TranscoderServiceClient, TranscoderServiceAsyncClient,]
)
def test_transcoder_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "transcoder.googleapis.com:443"
@pytest.mark.parametrize(
"client_class", [TranscoderServiceClient, TranscoderServiceAsyncClient,]
)
def test_transcoder_service_client_service_account_always_use_jwt(client_class):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
client = client_class(credentials=creds)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TranscoderServiceGrpcTransport, "grpc"),
(transports.TranscoderServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_transcoder_service_client_service_account_always_use_jwt_true(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
@pytest.mark.parametrize(
"client_class", [TranscoderServiceClient, TranscoderServiceAsyncClient,]
)
def test_transcoder_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "transcoder.googleapis.com:443"
def test_transcoder_service_client_get_transport_class():
transport = TranscoderServiceClient.get_transport_class()
available_transports = [
transports.TranscoderServiceGrpcTransport,
]
assert transport in available_transports
transport = TranscoderServiceClient.get_transport_class("grpc")
assert transport == transports.TranscoderServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TranscoderServiceClient, transports.TranscoderServiceGrpcTransport, "grpc"),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TranscoderServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceClient),
)
@mock.patch.object(
TranscoderServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceAsyncClient),
)
def test_transcoder_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TranscoderServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TranscoderServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
TranscoderServiceClient,
transports.TranscoderServiceGrpcTransport,
"grpc",
"true",
),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
TranscoderServiceClient,
transports.TranscoderServiceGrpcTransport,
"grpc",
"false",
),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TranscoderServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceClient),
)
@mock.patch.object(
TranscoderServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_transcoder_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TranscoderServiceClient, transports.TranscoderServiceGrpcTransport, "grpc"),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_transcoder_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TranscoderServiceClient, transports.TranscoderServiceGrpcTransport, "grpc"),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_transcoder_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_transcoder_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TranscoderServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_job(transport: str = "grpc", request_type=services.CreateJobRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
template_id="template_id_value",
)
response = client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
def test_create_job_from_dict():
test_create_job(request_type=dict)
def test_create_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
client.create_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobRequest()
@pytest.mark.asyncio
async def test_create_job_async(
transport: str = "grpc_asyncio", request_type=services.CreateJobRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
)
)
response = await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
@pytest.mark.asyncio
async def test_create_job_async_from_dict():
await test_create_job_async(request_type=dict)
def test_create_job_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = resources.Job()
client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_job_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_job_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_job(
parent="parent_value", job=resources.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job == resources.Job(name="name_value")
def test_create_job_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_job(
services.CreateJobRequest(),
parent="parent_value",
job=resources.Job(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_job_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_job(
parent="parent_value", job=resources.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job == resources.Job(name="name_value")
@pytest.mark.asyncio
async def test_create_job_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_job(
services.CreateJobRequest(),
parent="parent_value",
job=resources.Job(name="name_value"),
)
def test_list_jobs(transport: str = "grpc", request_type=services.ListJobsRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_jobs_from_dict():
test_list_jobs(request_type=dict)
def test_list_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
client.list_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobsRequest()
@pytest.mark.asyncio
async def test_list_jobs_async(
transport: str = "grpc_asyncio", request_type=services.ListJobsRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_jobs_async_from_dict():
await test_list_jobs_async(request_type=dict)
def test_list_jobs_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = services.ListJobsResponse()
client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_jobs_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobsResponse()
)
await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_jobs_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_jobs_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_jobs(
services.ListJobsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_jobs_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_jobs_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_jobs(
services.ListJobsRequest(), parent="parent_value",
)
def test_list_jobs_pager():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, resources.Job) for i in results)
def test_list_jobs_pages():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
pages = list(client.list_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_jobs_async_pager():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
async_pager = await client.list_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, resources.Job) for i in responses)
@pytest.mark.asyncio
async def test_list_jobs_async_pages():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_job(transport: str = "grpc", request_type=services.GetJobRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
template_id="template_id_value",
)
response = client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
def test_get_job_from_dict():
test_get_job(request_type=dict)
def test_get_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
client.get_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobRequest()
@pytest.mark.asyncio
async def test_get_job_async(
transport: str = "grpc_asyncio", request_type=services.GetJobRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
)
)
response = await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
@pytest.mark.asyncio
async def test_get_job_async_from_dict():
await test_get_job_async(request_type=dict)
def test_get_job_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = resources.Job()
client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_job_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_job_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_job_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_job(
services.GetJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_job_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_job_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_job(
services.GetJobRequest(), name="name_value",
)
def test_delete_job(transport: str = "grpc", request_type=services.DeleteJobRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_from_dict():
test_delete_job(request_type=dict)
def test_delete_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
client.delete_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobRequest()
@pytest.mark.asyncio
async def test_delete_job_async(
transport: str = "grpc_asyncio", request_type=services.DeleteJobRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_job_async_from_dict():
await test_delete_job_async(request_type=dict)
def test_delete_job_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = None
client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_job_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_job_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_job_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_job(
services.DeleteJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_job_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_job_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job(
services.DeleteJobRequest(), name="name_value",
)
def test_create_job_template(
transport: str = "grpc", request_type=services.CreateJobTemplateRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate(name="name_value",)
response = client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
def test_create_job_template_from_dict():
test_create_job_template(request_type=dict)
def test_create_job_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
client.create_job_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobTemplateRequest()
@pytest.mark.asyncio
async def test_create_job_template_async(
transport: str = "grpc_asyncio", request_type=services.CreateJobTemplateRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate(name="name_value",)
)
response = await client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_create_job_template_async_from_dict():
await test_create_job_template_async(request_type=dict)
def test_create_job_template_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobTemplateRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
call.return_value = resources.JobTemplate()
client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_job_template_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobTemplateRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
await client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_job_template_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_job_template(
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job_template == resources.JobTemplate(name="name_value")
assert args[0].job_template_id == "job_template_id_value"
def test_create_job_template_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_job_template(
services.CreateJobTemplateRequest(),
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
@pytest.mark.asyncio
async def test_create_job_template_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_job_template(
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job_template == resources.JobTemplate(name="name_value")
assert args[0].job_template_id == "job_template_id_value"
@pytest.mark.asyncio
async def test_create_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_job_template(
services.CreateJobTemplateRequest(),
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
def test_list_job_templates(
transport: str = "grpc", request_type=services.ListJobTemplatesRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse(
next_page_token="next_page_token_value",
)
response = client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobTemplatesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_job_templates_from_dict():
test_list_job_templates(request_type=dict)
def test_list_job_templates_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
client.list_job_templates()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
@pytest.mark.asyncio
async def test_list_job_templates_async(
transport: str = "grpc_asyncio", request_type=services.ListJobTemplatesRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobTemplatesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_job_templates_async_from_dict():
await test_list_job_templates_async(request_type=dict)
def test_list_job_templates_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
call.return_value = services.ListJobTemplatesResponse()
client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_job_templates_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse()
)
await client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_job_templates_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_job_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_job_templates_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_job_templates(
services.ListJobTemplatesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_job_templates_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_job_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_job_templates_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_job_templates(
services.ListJobTemplatesRequest(), parent="parent_value",
)
def test_list_job_templates_pager():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_job_templates(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, resources.JobTemplate) for i in results)
def test_list_job_templates_pages():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
pages = list(client.list_job_templates(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_job_templates_async_pager():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
async_pager = await client.list_job_templates(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, resources.JobTemplate) for i in responses)
@pytest.mark.asyncio
async def test_list_job_templates_async_pages():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_job_templates(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_job_template(
transport: str = "grpc", request_type=services.GetJobTemplateRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate(name="name_value",)
response = client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
def test_get_job_template_from_dict():
test_get_job_template(request_type=dict)
def test_get_job_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
client.get_job_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobTemplateRequest()
@pytest.mark.asyncio
async def test_get_job_template_async(
transport: str = "grpc_asyncio", request_type=services.GetJobTemplateRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate(name="name_value",)
)
response = await client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_get_job_template_async_from_dict():
await test_get_job_template_async(request_type=dict)
def test_get_job_template_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
call.return_value = resources.JobTemplate()
client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_job_template_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
await client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_job_template_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_job_template_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_job_template(
services.GetJobTemplateRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_job_template_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_job_template(
services.GetJobTemplateRequest(), name="name_value",
)
def test_delete_job_template(
transport: str = "grpc", request_type=services.DeleteJobTemplateRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_template_from_dict():
test_delete_job_template(request_type=dict)
def test_delete_job_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
client.delete_job_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobTemplateRequest()
@pytest.mark.asyncio
async def test_delete_job_template_async(
transport: str = "grpc_asyncio", request_type=services.DeleteJobTemplateRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_job_template_async_from_dict():
await test_delete_job_template_async(request_type=dict)
def test_delete_job_template_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
call.return_value = None
client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_job_template_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_job_template_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_job_template_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_job_template(
services.DeleteJobTemplateRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_job_template_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job_template(
services.DeleteJobTemplateRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TranscoderServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TranscoderServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.TranscoderServiceGrpcTransport,)
def test_transcoder_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TranscoderServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_transcoder_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TranscoderServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_job",
"list_jobs",
"get_job",
"delete_job",
"create_job_template",
"list_job_templates",
"get_job_template",
"delete_job_template",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_transcoder_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_transcoder_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_transcoder_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TranscoderServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TranscoderServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_transcoder_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TranscoderServiceGrpcTransport, grpc_helpers),
(transports.TranscoderServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_transcoder_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"transcoder.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="transcoder.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_transcoder_service_host_no_port():
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="transcoder.googleapis.com"
),
)
assert client.transport._host == "transcoder.googleapis.com:443"
def test_transcoder_service_host_with_port():
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="transcoder.googleapis.com:8000"
),
)
assert client.transport._host == "transcoder.googleapis.com:8000"
def test_transcoder_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TranscoderServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_transcoder_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TranscoderServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_path():
project = "squid"
location = "clam"
job = "whelk"
expected = "projects/{project}/locations/{location}/jobs/{job}".format(
project=project, location=location, job=job,
)
actual = TranscoderServiceClient.job_path(project, location, job)
assert expected == actual
def test_parse_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"job": "nudibranch",
}
path = TranscoderServiceClient.job_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_job_path(path)
assert expected == actual
def test_job_template_path():
project = "cuttlefish"
location = "mussel"
job_template = "winkle"
expected = "projects/{project}/locations/{location}/jobTemplates/{job_template}".format(
project=project, location=location, job_template=job_template,
)
actual = TranscoderServiceClient.job_template_path(project, location, job_template)
assert expected == actual
def test_parse_job_template_path():
expected = {
"project": "nautilus",
"location": "scallop",
"job_template": "abalone",
}
path = TranscoderServiceClient.job_template_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_job_template_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TranscoderServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = TranscoderServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = TranscoderServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = TranscoderServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = TranscoderServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = TranscoderServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = TranscoderServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = TranscoderServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TranscoderServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = TranscoderServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TranscoderServiceTransport, "_prep_wrapped_messages"
) as prep:
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TranscoderServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TranscoderServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 | 4,683,195,080,450,943,000 | 36.85645 | 136 | 0.661218 | false |
fredericojordan/chess | gui.py | 1 | 11862 | '''
Created on 2 de set de 2016
@author: fvj
'''
import pygame, chess
from random import choice
from traceback import format_exc
from sys import stderr
from time import strftime
from copy import deepcopy
pygame.init()
SQUARE_SIDE = 50
AI_SEARCH_DEPTH = 2
RED_CHECK = (240, 150, 150)
WHITE = (255, 255, 255)
BLUE_LIGHT = (140, 184, 219)
BLUE_DARK = (91, 131, 159)
GRAY_LIGHT = (240, 240, 240)
GRAY_DARK = (200, 200, 200)
CHESSWEBSITE_LIGHT = (212, 202, 190)
CHESSWEBSITE_DARK = (100, 92, 89)
LICHESS_LIGHT = (240, 217, 181)
LICHESS_DARK = (181, 136, 99)
LICHESS_GRAY_LIGHT = (164, 164, 164)
LICHESS_GRAY_DARK = (136, 136, 136)
BOARD_COLORS = [(GRAY_LIGHT, GRAY_DARK),
(BLUE_LIGHT, BLUE_DARK),
(WHITE, BLUE_LIGHT),
(CHESSWEBSITE_LIGHT, CHESSWEBSITE_DARK),
(LICHESS_LIGHT, LICHESS_DARK),
(LICHESS_GRAY_LIGHT, LICHESS_GRAY_DARK)]
BOARD_COLOR = choice(BOARD_COLORS)
BLACK_KING = pygame.image.load('images/black_king.png')
BLACK_QUEEN = pygame.image.load('images/black_queen.png')
BLACK_ROOK = pygame.image.load('images/black_rook.png')
BLACK_BISHOP = pygame.image.load('images/black_bishop.png')
BLACK_KNIGHT = pygame.image.load('images/black_knight.png')
BLACK_PAWN = pygame.image.load('images/black_pawn.png')
BLACK_JOKER = pygame.image.load('images/black_joker.png')
WHITE_KING = pygame.image.load('images/white_king.png')
WHITE_QUEEN = pygame.image.load('images/white_queen.png')
WHITE_ROOK = pygame.image.load('images/white_rook.png')
WHITE_BISHOP = pygame.image.load('images/white_bishop.png')
WHITE_KNIGHT = pygame.image.load('images/white_knight.png')
WHITE_PAWN = pygame.image.load('images/white_pawn.png')
WHITE_JOKER = pygame.image.load('images/white_joker.png')
CLOCK = pygame.time.Clock()
CLOCK_TICK = 15
SCREEN = pygame.display.set_mode((8*SQUARE_SIDE, 8*SQUARE_SIDE), pygame.RESIZABLE)
SCREEN_TITLE = 'Chess Game'
pygame.display.set_icon(pygame.image.load('images/chess_icon.ico'))
pygame.display.set_caption(SCREEN_TITLE)
def resize_screen(square_side_len):
global SQUARE_SIDE
global SCREEN
SCREEN = pygame.display.set_mode((8*square_side_len, 8*square_side_len), pygame.RESIZABLE)
SQUARE_SIDE = square_side_len
def print_empty_board():
SCREEN.fill(BOARD_COLOR[0])
paint_dark_squares(BOARD_COLOR[1])
def paint_square(square, square_color):
col = chess.FILES.index(square[0])
row = 7-chess.RANKS.index(square[1])
pygame.draw.rect(SCREEN, square_color, (SQUARE_SIDE*col,SQUARE_SIDE*row,SQUARE_SIDE,SQUARE_SIDE), 0)
def paint_dark_squares(square_color):
for position in chess.single_gen(chess.DARK_SQUARES):
paint_square(chess.bb2str(position), square_color)
def get_square_rect(square):
col = chess.FILES.index(square[0])
row = 7-chess.RANKS.index(square[1])
return pygame.Rect((col*SQUARE_SIDE, row*SQUARE_SIDE), (SQUARE_SIDE,SQUARE_SIDE))
def coord2str(position, color=chess.WHITE):
if color == chess.WHITE:
file_index = int(position[0]/SQUARE_SIDE)
rank_index = 7 - int(position[1]/SQUARE_SIDE)
return chess.FILES[file_index] + chess.RANKS[rank_index]
if color == chess.BLACK:
file_index = 7 - int(position[0]/SQUARE_SIDE)
rank_index = int(position[1]/SQUARE_SIDE)
return chess.FILES[file_index] + chess.RANKS[rank_index]
def print_board(board, color=chess.WHITE):
if color == chess.WHITE:
printed_board = board
if color == chess.BLACK:
printed_board = chess.rotate_board(board)
print_empty_board()
if chess.is_check(board, chess.WHITE):
paint_square(chess.bb2str(chess.get_king(printed_board, chess.WHITE)), RED_CHECK)
if chess.is_check(board, chess.BLACK):
paint_square(chess.bb2str(chess.get_king(printed_board, chess.BLACK)), RED_CHECK)
for position in chess.colored_piece_gen(printed_board, chess.KING, chess.BLACK):
SCREEN.blit(pygame.transform.scale(BLACK_KING, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.QUEEN, chess.BLACK):
SCREEN.blit(pygame.transform.scale(BLACK_QUEEN, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.ROOK, chess.BLACK):
SCREEN.blit(pygame.transform.scale(BLACK_ROOK, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.BISHOP, chess.BLACK):
SCREEN.blit(pygame.transform.scale(BLACK_BISHOP, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.KNIGHT, chess.BLACK):
SCREEN.blit(pygame.transform.scale(BLACK_KNIGHT, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.PAWN, chess.BLACK):
SCREEN.blit(pygame.transform.scale(BLACK_PAWN, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.JOKER, chess.BLACK):
SCREEN.blit(pygame.transform.scale(BLACK_JOKER, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.KING, chess.WHITE):
SCREEN.blit(pygame.transform.scale(WHITE_KING, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.QUEEN, chess.WHITE):
SCREEN.blit(pygame.transform.scale(WHITE_QUEEN, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.ROOK, chess.WHITE):
SCREEN.blit(pygame.transform.scale(WHITE_ROOK, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.BISHOP, chess.WHITE):
SCREEN.blit(pygame.transform.scale(WHITE_BISHOP, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.KNIGHT, chess.WHITE):
SCREEN.blit(pygame.transform.scale(WHITE_KNIGHT, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.PAWN, chess.WHITE):
SCREEN.blit(pygame.transform.scale(WHITE_PAWN, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
for position in chess.colored_piece_gen(printed_board, chess.JOKER, chess.WHITE):
SCREEN.blit(pygame.transform.scale(WHITE_JOKER, (SQUARE_SIDE,SQUARE_SIDE)), get_square_rect(chess.bb2str(position)))
pygame.display.flip()
def set_title(title):
pygame.display.set_caption(title)
pygame.display.flip()
def make_AI_move(game, color):
set_title(SCREEN_TITLE + ' - Calculating move...')
new_game = chess.make_move(game, chess.get_AI_move(game, AI_SEARCH_DEPTH))
set_title(SCREEN_TITLE)
print_board(new_game.board, color)
return new_game
def try_move(game, attempted_move):
for move in chess.legal_moves(game, game.to_move):
if move == attempted_move:
game = chess.make_move(game, move)
return game
def play_as(game, color):
run = True
ongoing = True
joker = 0
try:
while run:
CLOCK.tick(CLOCK_TICK)
print_board(game.board, color)
if chess.game_ended(game):
set_title(SCREEN_TITLE + ' - ' + chess.get_outcome(game))
ongoing = False
if ongoing and game.to_move == chess.opposing_color(color):
game = make_AI_move(game, color)
if chess.game_ended(game):
set_title(SCREEN_TITLE + ' - ' + chess.get_outcome(game))
ongoing = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
leaving_square = coord2str(event.pos, color)
if event.type == pygame.MOUSEBUTTONUP:
arriving_square = coord2str(event.pos, color)
if ongoing and game.to_move == color:
move = (chess.str2bb(leaving_square), chess.str2bb(arriving_square))
game = try_move(game, move)
print_board(game.board, color)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE or event.key == 113:
run = False
if event.key == 104 and ongoing: # H key
game = make_AI_move(game, color)
if event.key == 117: # U key
game = chess.unmake_move(game)
game = chess.unmake_move(game)
set_title(SCREEN_TITLE)
print_board(game.board, color)
ongoing = True
if event.key == 99: # C key
global BOARD_COLOR
new_colors = deepcopy(BOARD_COLORS)
new_colors.remove(BOARD_COLOR)
BOARD_COLOR = choice(new_colors)
print_board(game.board, color)
if event.key == 112 or event.key == 100: # P or D key
print(game.get_move_list() + '\n')
print('\n'.join(game.position_history))
if event.key == 101: # E key
print('eval = ' + str(chess.evaluate_game(game)/100))
if event.key == 106: # J key
joker += 1
if joker == 13 and chess.get_queen(game.board, color):
queen_index = chess.bb2index(chess.get_queen(game.board, color))
game.board[queen_index] = color|chess.JOKER
print_board(game.board, color)
if event.type == pygame.VIDEORESIZE:
if SCREEN.get_height() != event.h:
resize_screen(int(event.h/8.0))
elif SCREEN.get_width() != event.w:
resize_screen(int(event.w/8.0))
print_board(game.board, color)
except:
print(format_exc(), file=stderr)
bug_file = open('bug_report.txt', 'a')
bug_file.write('----- ' + strftime('%x %X') + ' -----\n')
bug_file.write(format_exc())
bug_file.write('\nPlaying as WHITE:\n\t' if color == chess.WHITE else '\nPlaying as BLACK:\n\t')
bug_file.write(game.get_move_list() + '\n\t')
bug_file.write('\n\t'.join(game.position_history))
bug_file.write('\n-----------------------------\n\n')
bug_file.close()
def play_as_white(game=chess.Game()):
return play_as(game, chess.WHITE)
def play_as_black(game=chess.Game()):
return play_as(game, chess.BLACK)
def play_random_color(game=chess.Game()):
color = choice([chess.WHITE, chess.BLACK])
play_as(game, color)
# chess.verbose = True
play_random_color()
| mit | -4,019,113,260,679,411,700 | 45.071429 | 125 | 0.599309 | false |
marrow/schema | example/thirdparty/formv.py | 1 | 7031 | [
#'formv.validators.base:VBase', # ABC; reqiured, strip
#'formv.validators.base:VConstant', #
#'formv.validators.base:VBool', # bool(value)
#'formv.validators.base:VEmpty', #
#'formv.validators.base:VLength', # min/max
#'formv.validators.base:VRange', # min < val < max
#'formv.validators.base:VList', # isinstance(list) or isinstance(set, tuple), returns value, list(value), or [value]
#'formv.validators.base:VSet', # as per VList, but with sets
#'formv.validators.chained:VAnyField', # at least one field specified has a value
#'formv.validators.chained:VAllFields', #
#'formv.validators.chained:VPair', # if a has a value, b must have value too
'formv.validators.chained:VCountryPostcode', #
'formv.validators.chained:VCountryStatePostcode', #
'formv.validators.chained:VPostcodeFormat', #
'formv.validators.chained:VState', #
'formv.validators.chained:VCurrency', #
'formv.validators.chained:VLanguage', #
'formv.validators.chained:VGeoDistance', #
'formv.validators.chained:VPhoneFormat', #
'formv.validators.chained:VCreditCard', #
#'formv.validators.compound:VCompound', # .validators
#'formv.validators.compound:VAny', # return on first success
#'formv.validators.compound:VPipe', # return if all succeed
#'formv.validators.dates:VDate', # earliest, latest, after_now, today_or_after, format
#'formv.validators.dates:VTime', #
#'formv.validators.dates:VToDate', # much more basic than VDate, strptime
'formv.validators.encoders:VEncoded', # crypt
'formv.validators.encoders:VEncodedPair', # a+b crypt (i.e. user+pass)
'formv.validators.files:VUploadFile', # tmpdir, mime_types, size, compress, resize, thumbnail, move_to, backup_to
'formv.validators.files:VWatermarkImage', # type, mode, text, layer, font, color, file, margin, opacity, angle
'formv.validators.files:VImprintImage', #
'formv.validators.files:VTextToImage', #
#'formv.validators.geographic:VLatitude', #
#'formv.validators.geographic:VLongitude', #
#'formv.validators.geographic:VCountry', #
#'formv.validators.network:VIPAddress', # inet_ntoa for ipv4 or ipv6 from the dns package
#'formv.validators.network:VCIDR', # ntoa, also using dns
#'formv.validators.network:VMACAddress', # basic algorithm; simpler is int(val.replace(':', ''), 16)
#'formv.validators.numbers:VInteger', #
#'formv.validators.numbers:VFloat', #
#'formv.validators.numbers:VNumber', #
'formv.validators.schema:VSchema', #
'formv.validators.signers:VSignedString', # b64 + hmac
'formv.validators.signers:VSignedObject', # as above, but pickled first
#'formv.validators.strings:VString', #
#'formv.validators.strings:VRegex', #
#'formv.validators.strings:VText', # regex ^[a-zA-Z_\-0-9]*$
#'formv.validators.strings:VEmail', # regex; permissive
#'formv.validators.strings:VPassword', # has stupid special character restriction option (i.e. [^a-zA-Z]{2,})
#'formv.validators.strings:VURL', # only http, https, ftp, uses urlparse; will optionally urlopen
#'formv.validators.strings:VUserAgent', # has default allowed/notallowed lists
]
class WebForm(VSchema):
fields = {
'first_name': VString(min_len=3, max_len=50),
'last_name': VString(min_len=3, max_len=50),
'postcode': VString(),
'state': VString(),
'country': VCountry(required=True, mode='by-name'),
'email': VEmail(required=True),
'password': VPassword(special_chars=3),
'file_upload': VPipe(VUploadFile(mime_types=mime_types,
temp_dir='/tmp/formv/test/tmp',),
VWatermarkImage(type='image',
file=os.path.join(app_root, 'tests/watermarks/copyright.jpg'),
opacity=.04, angle=45),
VWatermarkImage(text='formv text watermark', angle=25,
color=(0,0,0,128), opacity=1),
VImprintImage(text='Note the image watermark in the background',
color=(0,128,128,255)),
VImprintImage(text=datetime.strftime(datetime.utcnow(),
'Uploaded on %Y/%m/%d - %H:%M:%S GMT'),
color=(255,128,128,255),
margin=(25,10)),
)
}
chains = {
'coordinates': VCountryPostcode(country_field='country', # extracts (latitude, longitude) pair
postcode_field='postcode'),
'password': VEncodedPair(required_field='password', # encodes (password, email) pair
required_label='Password',
available_field='email'),
'state': VState(country_field='country', # validates state against country
state_field='state', mode='by-name'),
}
form = WebForm(allow_missing_keys=True,
allow_extra_keys=True,
replace_empty_value=True,
empty_values={
# inject recovered file back into form if no new file has been uploaded
'file_upload': session.files.get('file_upload'),
})
return form.validate(request)
class WebForm(VSchema):
""" form validator """
fields = {
'first_name': VString(min_len=3, max_len=50),
'last_name': VString(min_len=3, max_len=50),
'email': VEmail(required=True),
'address':VString(),
'postcode_start': VString(),
'postcode_end': VString(),
'state': VString(),
'country': VCountry(required=True, mode='by-name'),
'currency': VString(),
'price': VFloat(),
'units': VInteger(),
'pay_method': VString(),
'phone': VString(),
'phone_type': VString(),
'fax': VString(),
'date': VPipe(VToDate(date_format='%d/%m/%Y'), VDate(today_or_after=False)),
'missing_field': VString(),
'username': VString(),
'password': VPassword(special_chars=3),
'file_pdf': VUploadFile(required=True,
mime_types=mime_types,
temp_dir='/tmp/formv/test/tmp',),
'file_jpg': VPipe(VUploadFile(mime_types=mime_types,
temp_dir='/tmp/formv/test/tmp',),
VWatermarkImage(text='watermark'),
VImprintImage(text='imprint')),
'file_csv': VUploadFile(mime_types=mime_types,
temp_dir='/tmp/formv/test/tmp',),
'file_empty': VUploadFile(mime_types=mime_types,
temp_dir='/tmp/formv/test/tmp',),
}
chains = {
'contact': VAnyField(fields=('email', 'phone', 'fax'),
msg='Please provide some relevant, public contact details'),
'state': VState(country_field='country',
state_field='state', mode='by-name'),
'currency': VCurrency(country_field='country',
currency_field='currency', mode='by-name'),
'origin': VCountryPostcode(country_field='country',
postcode_field='postcode_start'),
'destination': VCountryPostcode(country_field='country',
postcode_field='postcode_end'),
'phone_type': VPair(required_field='phone_type',
required_label='Phone type',
available_field='phone'),
'pay_method': VPair(required_field='pay_method',
required_label='Payment method',
available_field='price'),
'password': VEncodedPair(required_field='password',
required_label='VPassword',
available_field='username'),
'z-geodist': VGeoDistance(origin_field='origin',
destination_field='destination'),
}
| mit | 2,609,608,487,151,520,000 | 38.5 | 118 | 0.668895 | false |
kernc/orange3-text | orangecontrib/text/lda.py | 1 | 3585 | import numpy as np
from gensim import corpora, models, matutils
from Orange.data.table import Table
from Orange.data.domain import Domain, ContinuousVariable, StringVariable
from orangecontrib.text.topics import Topics
def chunk_list(l, num):
num = min(len(l), num)
avg = len(l) / float(num)
out = []
last = 0.0
while last < len(l):
out.append(l[int(last):int(last + avg)])
last += avg
return out
MAX_WORDS = 1000
class LDA:
def __init__(self, text, num_topics=5, callback=None):
"""
Wraper for Gensim LDA model.
:param text: Preprocessed text.
:param num_topics: Number of topics to infer.
:return: None
"""
self.text = text
self.num_topics = num_topics
# generate dict and corpus
dictionary = corpora.Dictionary(self.text)
corpus = [dictionary.doc2bow(t) for t in self.text]
lda = models.LdaModel(id2word=dictionary, num_topics=self.num_topics)
done = 0
for i, part in enumerate(chunk_list(corpus, 95)):
lda.update(part)
done += len(part)
callback(95.0*done/len(corpus))
corpus = lda[corpus]
topics = lda.show_topics(num_topics=-1, num_words=3, formatted=False)
names = [', '.join([i[1] for i in t]) for t in topics]
names = ['Topic{} ({})'.format(i, n) for i, n in enumerate(names, 1)]
self.topic_names = names
self.corpus = corpus
self.lda = lda
def insert_topics_into_corpus(self, corp_in):
"""
Insert topical representation into corpus.
:param corp_in: Corpus into whic we want to insert topical representations
:return: `Orange.data.table.Table`
"""
matrix = matutils.corpus2dense(self.corpus,
num_terms=self.num_topics).T
# Generate the new table.
attr = [ContinuousVariable(n) for n in self.topic_names]
domain = Domain(attr,
corp_in.domain.class_vars,
metas=corp_in.domain.metas)
return Table.from_numpy(domain,
matrix,
Y=corp_in._Y,
metas=corp_in.metas)
def get_topics_table_by_id(self, topic_id):
"""
Transform topics from gensim LDA model to table.
:param lda: gensim LDA model.
:return: `Orange.data.table.Table`.
"""
topics = self.lda.show_topics(num_topics=-1, num_words=MAX_WORDS, formatted=False)
if topic_id >= len(topics):
raise ValueError("Too large topic ID.")
num_words = max([len(it) for it in topics])
data = np.zeros((num_words, 2), dtype=object)
data[:, 0] = [item[1] for item in topics[topic_id]]
data[:, 1] = [item[0] for item in topics[topic_id]]
metas = [StringVariable(self.topic_names[topic_id]),
ContinuousVariable("Topic{}_weights".format(topic_id+1))]
metas[-1]._out_format = '%.2e'
domain = Domain([], metas=metas)
t = Topics.from_numpy(domain,
X=np.zeros((num_words, 0)),
metas=data)
t.W = data[:, 1]
return t
def get_top_words_by_id(self, topic_id):
topics = self.lda.show_topics(num_topics=-1, num_words=10, formatted=False)
if topic_id >= len(topics):
raise ValueError("Too large topic ID.")
return [item[1] for item in topics[topic_id]]
| bsd-2-clause | 2,362,877,309,063,393,300 | 32.504673 | 90 | 0.55788 | false |
treyhunner/databundles | databundles/repository.py | 1 | 14389 | """Interface to the CKAN data repository, for uploading bundle records and data extracts.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from databundles.dbexceptions import ConfigurationError
import petl.fluent as petlf
class Repository(object):
'''Interface to the CKAN data repository, for uploading bundle records and
data extracts. classdocs
'''
def __init__(self, bundle, repo_name='default'):
'''Create a new repository interface
'''
import databundles.client.ckan
import time, datetime
self.bundle = bundle
self.extracts = self.bundle.config.group('extracts')
self.partitions = self.bundle.partitions
self.repo_name = repo_name
self._api = None
@property
def api(self):
if not self._api:
self.set_api()
return self._api
def set_api(self):
import databundles.client.ckan
repo_group = self.bundle.config.group('repository')
if not repo_group.get(self.repo_name):
raise ConfigurationError("'repository' group in configure either nonexistent"+
" or missing {} sub-group ".format(self.repo_name))
repo_config = repo_group.get(self.repo_name)
self._api = databundles.client.ckan.Ckan( repo_config.url, repo_config.key)
return self.api
def _validate_for_expr(self, astr,debug=False):
"""Check that an expression is save to evaluate"""
import os
import ast
try: tree=ast.parse(astr)
except SyntaxError: raise ValueError(
"Could not parse code expression : \"{}\" ".format(astr)+
" ")
for node in ast.walk(tree):
if isinstance(node,(ast.Module,
ast.Expr,
ast.Dict,
ast.Str,
ast.Attribute,
ast.Num,
ast.Name,
ast.Load,
ast.BinOp,
ast.Compare,
ast.Eq,
ast.Import,
ast.alias,
ast.Call
)):
continue
if (isinstance(node,ast.Call)
and isinstance(node.func, ast.Attribute)
and node.func.attr == 'datetime'):
continue
if debug:
attrs=[attr for attr in dir(node) if not attr.startswith('__')]
print(node)
for attrname in attrs:
print(' {k} ==> {v}'.format(k=attrname,v=getattr(node,attrname)))
raise ValueError("Bad node {} in {}. This code is not allowed to execute".format(node,astr))
return True
def _do_extract(self, extract_data, force=False):
import os # For the eval @UnusedImport
done_if = extract_data.get('done_if',False)
if not force and done_if and self._validate_for_expr(done_if, True):
if eval(done_if):
self.bundle.log("For extract {}, done_if ( {} ) evaluated true"
.format(extract_data['_name'], done_if))
return extract_data['path']
if extract_data.get('function',False):
file_ = self._do_function_extract(extract_data)
elif extract_data.get('query',False):
file_ = self._do_query_extract(extract_data)
else:
from databundles.dbexceptions import ConfigurationError
raise ConfigurationError("Bad Extract config: {}".format(extract_data))
return file_
def _do_function_extract(self, extract_data):
'''Run a function on the build that produces a file to upload'''
import os.path
f_name = extract_data['function']
f = getattr(self.bundle, f_name)
file_ = f(extract_data)
return file_
def _do_query_extract(self, extract_data):
"""Extract a CSV file and upload it to CKAN"""
import tempfile
import uuid
import os
p = extract_data['_partition'] # Set in _make_partition_dict
file_name = extract_data.get('name', None)
if file_name:
file_ = self.bundle.filesystem.path('extracts', file_name)
else:
file_ = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) )
self.bundle.log("Extracting {} to {}".format(extract_data['title'],file_))
petlf.fromsqlite3(p.database.path, extract_data['query'] ).tocsv(file_) #@UndefinedVariable
return file_
def _send(self, package, extract_data, file_):
import os
import mimetypes
_, ext = os.path.splitext(file_)
mimetypes.init()
content_type = mimetypes.types_map.get(ext,None) #@UndefinedVariable
try:
_,format = content_type.split('/')
except:
format = None
name = extract_data.get('name', os.path.basename(file_))
r = self.api.add_file_resource(package, file_,
name=name,
description=extract_data['description'],
content_type = content_type,
format=format
)
return r
def _make_partition_dict(self, p):
'''Return a dict that includes the fields from the extract expanded for
the values of each and the partition'''
qd = {
'p_id' : p.identity.id_,
'p_name' : p.identity.name,
}
try:
# Bundles don't have these
qd_part = {
'p_table' : p.identity.table,
'p_space' : p.identity.space,
'p_time' : p.identity.time,
'p_grain' : p.identity.grain,
}
except:
qd_part = {'p_table' : '','p_space' : '', 'p_time' :'','p_grain' : ''}
qd = dict(qd.items()+ qd_part.items())
qd['_partition'] = p
return qd
def _expand_each(self, each):
'''Generate a set of dicts from the cross product of each of the
arrays of 'each' group'''
# Normalize the each group, particular for the case where there is only
# one dimension
if not isinstance(each, list):
raise ConfigurationError("The 'each' key must have a list. Got a {} ".format(type(each)))
elif len(each) == 0:
each = [[{}]]
if not isinstance(each[0], list):
each = [each]
# Now the top level arrays of each are dimensions, and we can do a
# multi dimensional iteration over them.
# This is essentially a cross-product, where out <- out X dim(i)
out = []
for i,dim in enumerate(each):
if i == 0:
out = dim
else:
o2 = []
for i in dim:
for j in out:
o2.append(dict(i.items()+j.items()))
out = o2
return out
def _expand_partitions(self, partition_name='any', for_=None):
'''Generate a list of partitions to apply the extract process to. '''
if partition_name == 'any':
partitions = [p for p in self.partitions]
partitions = [self.bundle] + partitions
else:
partition = self.partitions.get(partition_name)
partitions = [partition]
out = []
if not for_:
for_ = 'True'
for partition in partitions:
try:
self.bundle.log("Testing: {} ".format(partition.identity.name))
if self._validate_for_expr(for_, True):
if eval(for_):
out.append(partition)
except Exception as e:
self.bundle.error("Error in evaluting for '{}' : {} ".format(for_, e))
return out
def _sub(self, data):
if data.get('aa', False):
from databundles.geo.analysisarea import get_analysis_area
aa = get_analysis_area(self.bundle.library, **data['aa'])
aa_d = dict(aa.__dict__)
aa_d['aa_name'] = aa_d['name']
del aa_d['name']
data = dict(data.items() + aa_d.items())
data['query'] = data.get('query','').format(**data)
data['title'] = data.get('title','').format(**data)
data['description'] = data.get('description','').format(**data)
data['name'] = data.get('name','').format(**data)
data['path'] = self.bundle.filesystem.path('extracts',format(data['name']))
data['done_if'] = data.get('done_if','').format(**data)
return data
def dep_tree(self, root):
"""Return the tree of dependencies rooted in the given nod name,
excluding all other nodes"""
graph = {}
for key,extract in self.extracts.items():
graph[key] = set(extract.get('depends',[]))
def _recurse(node):
l = set([node])
for n in graph[node]:
l = l | _recurse(n)
return l
return _recurse(root)
def generate_extracts(self, root=None):
"""Generate dicts that have the data for an extract, along with the
partition, query, title and description
:param root: The name of an extract group to use as the root of
the dependency tree
:type root: string
If `root` is specified, it is a name of an extract group from the configuration,
and the only extracts performed will be the named extracts and any of its
dependencies.
"""
import collections
from databundles.util import toposort
ext_config = self.extracts
# Order the extracts to satisfy dependencies.
graph = {}
for key,extract in ext_config.items():
graph[key] = set(extract.get('depends',[]))
if graph:
exec_list = []
for group in toposort(graph):
exec_list.extend(group)
else:
exec_list = ext_config.keys()
if root:
deps = self.dep_tree(root)
exec_list = [ n for n in exec_list if n in deps]
# now can iterate over the list.
for key in exec_list:
extract = ext_config[key]
extract['_name'] = key
for_ = extract.get('for', "'True'")
function = extract.get('function', False)
each = extract.get('each', [])
p_id = extract.get('partition', False)
eaches = self._expand_each(each)
# This part is a awful hack and should be refactored
if function:
for data in eaches:
yield self._sub(dict(extract.items() + data.items()))
elif p_id:
partitions = self._expand_partitions(p_id, for_)
for partition in partitions:
p_dict = self._make_partition_dict(partition)
for data in eaches:
yield self._sub(dict(p_dict.items()+extract.items() +
data.items() ))
def store_document(self, package, config):
import re, string
id = re.sub('[\W_]+', '-',config['title'])
r = self.api.add_url_resource(package,
config['url'],
config['title'],
description=config['description'])
return r
def extract(self, root=None, force=False):
import os
for extract_data in self.generate_extracts(root=root):
file_ = self._do_extract(extract_data, force=force)
if file_ is True:
#self.bundle.log("Extract {} marked as done".format(extract_data['_name']))
pass
elif file_ and os.path.exists(file_):
self.bundle.log("Extracted: {}".format(file_))
else:
self.bundle.error("Extracted file {} does not exist".format(file_))
return True
def submit(self, root=None, force=False, repo=None):
"""Create a dataset for the bundle, then add a resource for each of the
extracts listed in the bundle.yaml file"""
if repo:
self.repo_name = repo
self.set_api()
self.bundle.update_configuration()
from os.path import basename
ckb = self.api.update_or_new_bundle_extract(self.bundle)
sent = set()
# Clear out existing resources.
ckb['resources'] = []
self.api.put_package(ckb)
for doc in self.bundle.config.group('about').get('documents',[]):
self.store_document(ckb, doc)
for extract_data in self.generate_extracts(root=root):
file_ = self._do_extract(extract_data, force=force)
if file_ not in sent:
r = self._send(ckb, extract_data,file_)
sent.add(file_)
url = r['ckan_url']
self.bundle.log("Submitted {} to {}".format(basename(file_), url))
else:
self.bundle.log("Already processed {}, not sending.".format(basename(file_)))
return True | bsd-3-clause | -3,056,784,921,494,447,600 | 33.674699 | 104 | 0.496282 | false |
matrix-org/synapse | synapse/storage/schema/main/delta/31/pushers.py | 1 | 2740 | # Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Change the last_token to last_stream_ordering now that pushers no longer
# listen on an event stream but instead select out of the event_push_actions
# table.
import logging
logger = logging.getLogger(__name__)
def token_to_stream_ordering(token):
return int(token[1:].split("_")[0])
def run_create(cur, database_engine, *args, **kwargs):
logger.info("Porting pushers table, delta 31...")
cur.execute(
"""
CREATE TABLE IF NOT EXISTS pushers2 (
id BIGINT PRIMARY KEY,
user_name TEXT NOT NULL,
access_token BIGINT DEFAULT NULL,
profile_tag VARCHAR(32) NOT NULL,
kind VARCHAR(8) NOT NULL,
app_id VARCHAR(64) NOT NULL,
app_display_name VARCHAR(64) NOT NULL,
device_display_name VARCHAR(128) NOT NULL,
pushkey TEXT NOT NULL,
ts BIGINT NOT NULL,
lang VARCHAR(8),
data TEXT,
last_stream_ordering INTEGER,
last_success BIGINT,
failing_since BIGINT,
UNIQUE (app_id, pushkey, user_name)
)
"""
)
cur.execute(
"""SELECT
id, user_name, access_token, profile_tag, kind,
app_id, app_display_name, device_display_name,
pushkey, ts, lang, data, last_token, last_success,
failing_since
FROM pushers
"""
)
count = 0
for row in cur.fetchall():
row = list(row)
row[12] = token_to_stream_ordering(row[12])
cur.execute(
"""
INSERT into pushers2 (
id, user_name, access_token, profile_tag, kind,
app_id, app_display_name, device_display_name,
pushkey, ts, lang, data, last_stream_ordering, last_success,
failing_since
) values (%s)
"""
% (",".join(["?" for _ in range(len(row))])),
row,
)
count += 1
cur.execute("DROP TABLE pushers")
cur.execute("ALTER TABLE pushers2 RENAME TO pushers")
logger.info("Moved %d pushers to new table", count)
def run_upgrade(cur, database_engine, *args, **kwargs):
pass
| apache-2.0 | 8,931,685,205,888,015,000 | 30.860465 | 76 | 0.606934 | false |
GaelVaroquaux/scikits.image | scikits/image/io/setup.py | 1 | 1398 | #!/usr/bin/env python
from scikits.image._build import cython
import os.path
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('io', parent_package, top_path)
config.add_data_dir('tests')
config.add_data_files('_plugins/*.ini')
# This function tries to create C files from the given .pyx files. If
# it fails, we build the checked-in .c files.
cython(['_plugins/_colormixer.pyx', '_plugins/_histograms.pyx'],
working_path=base_path)
config.add_extension('_plugins._colormixer',
sources=['_plugins/_colormixer.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_plugins._histograms',
sources=['_plugins/_histograms.c'],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer = 'scikits.image Developers',
maintainer_email = '[email protected]',
description = 'Image I/O Routines',
url = 'http://stefanv.github.com/scikits.image/',
license = 'Modified BSD',
**(configuration(top_path='').todict())
)
| bsd-3-clause | -264,437,310,782,858,430 | 34.846154 | 79 | 0.61588 | false |
qilicun/python | python2/ecl_local.py | 1 | 2993 | import os
import socket
import sys
import ert.job_queue.driver as driver
#################################################################
# Only a quite few of the Linux computers in Statoil are proper LSF
# nodes, in the sense that they can talk directly to the LSF
# demons. When using LSF from a computer which is not properly part of
# the LSF cluster you must first ssh to a node which can serve as LSF
# server, and then issue the LSF commands there. This is controlled by
# the LSF_SERVER option in the LSF driver.
#
# In this configuration file the LSF server to use is determined by
# using the two first characters from the name of the submitting host
# as a lookup key in the server_list dictionary.
#
# If your workstation has proper access to LSF you can set the
# environment variable LOCAL_LSF; in that case the lsf_server variable
# will be left at None and the LSF driver instance will issue the LSF
# commands directly at the calling host without going through ssh.
#
# Observe that the ssh-based scheme requires that you have
# passwordless login to the server used as lsf server.
#################################################################
server_list = { "be" : "be-grid01.be.statoil.no",
"st" : "st-grid01.st.statoil.no",
"tr" : "tr-grid01.tr.statoil.no",
"stj" : "tr-grid01.tr.statoil.no",
"rio" : "rio-grid01.rio.statoil.no" }
def get_lsf_server():
if os.getenv("LOCAL_LSF"):
# The user has set the LOCAL_LSF environment variable -
# signalling that she has access to a proper LSF node.
return None
else:
host = socket.gethostname()
host_prefix = host.split("-")[0]
lsf_server = server_list.get( host_prefix , None)
# This will silently return None if no appropriate LSF server
# is found. In that case things will blow up at a later stage
# if/when someone tries to use the invalid lsf server.
return lsf_server
# The command used to run ECLIPSE. The executable will be called with
# commandline arguments: version data_file num_cpu
ecl_cmd = "/project/res/etc/ERT/Scripts/run_eclipse.py"
# The ECLIPSE version which will be used, by default.
ecl_version = "2010.2"
# The resource request passed to the LSF server. In practice every god-damn compute node
# in Statoil will satisfy these needs, so it could just be left as None.
lsf_resource_request = "select[cs && x86_64Linux] rusage[ecl100v2000=1:duration=5]"
lsf_queue = "normal"
rsh_command = "/usr/bin/ssh"
driver_options = { driver.LSF_DRIVER : [("LSF_QUEUE" , lsf_queue),
("LSF_RESOURCE" , lsf_resource_request),
("LSF_SERVER" , get_lsf_server())],
driver.RSH_DRIVER : [("RSH_COMMAND" , rsh_command)],
driver.LOCAL_DRIVER : []}
driver_type = driver.LSF_DRIVER
| gpl-3.0 | -7,270,796,649,879,760,000 | 40.569444 | 88 | 0.625459 | false |
mitodl/open-discussions | course_catalog/migrations/0060_contentfile_moverawjson.py | 1 | 3152 | # Generated by Django 2.2.9 on 2020-02-13 14:43
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("course_catalog", "0059_remove_deprecated_offered_by")]
operations = [
migrations.AddField(
model_name="bootcamp",
name="raw_json",
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="learningresourcerun",
name="raw_json",
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.CreateModel(
name="ContentFile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("uid", models.CharField(blank=True, max_length=36, null=True)),
("key", models.CharField(blank=True, max_length=1024, null=True)),
("title", models.CharField(blank=True, max_length=1024, null=True)),
("description", models.TextField(blank=True, null=True)),
("url", models.TextField(blank=True, null=True)),
("short_url", models.TextField(blank=True, null=True)),
("file_type", models.CharField(blank=True, max_length=128, null=True)),
("section", models.CharField(blank=True, max_length=512, null=True)),
("content", models.TextField(blank=True, null=True)),
(
"content_title",
models.CharField(blank=True, max_length=1024, null=True),
),
(
"content_author",
models.CharField(blank=True, max_length=1024, null=True),
),
(
"content_language",
models.CharField(blank=True, max_length=24, null=True),
),
(
"content_type",
models.CharField(
choices=[("page", "page"), ("file", "file")],
default="file",
max_length=10,
),
),
(
"run",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="content_files",
to="course_catalog.LearningResourceRun",
),
),
],
options={
"verbose_name": "contentfile",
"unique_together": {("key", "run")},
},
),
]
| bsd-3-clause | -7,594,405,783,349,896,000 | 38.4 | 88 | 0.465102 | false |
eviljeff/zamboni | mkt/lookup/views.py | 1 | 22456 | import json
import uuid
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import connection
from django.db.models import Count, Q, Sum
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_POST
import commonware.log
from babel import numbers
from elasticsearch_dsl import Q as ES_Q, query
from slumber.exceptions import HttpClientError, HttpServerError
from tower import ugettext as _
import mkt
import mkt.constants.lookup as lkp
from lib.pay_server import client
from mkt.access import acl
from mkt.account.utils import purchase_list
from mkt.comm.utils import create_comm_note
from mkt.constants import comm
from mkt.constants.payments import (COMPLETED, FAILED, PENDING,
PROVIDER_BANGO, PROVIDER_LOOKUP,
SOLITUDE_REFUND_STATUSES)
from mkt.developers.models import ActivityLog, AddonPaymentAccount
from mkt.developers.providers import get_provider
from mkt.developers.views_payments import _redirect_to_bango_portal
from mkt.lookup.forms import (APIFileStatusForm, APIStatusForm, DeleteUserForm,
TransactionRefundForm, TransactionSearchForm)
from mkt.prices.models import AddonPaymentData, Refund
from mkt.purchase.models import Contribution
from mkt.reviewers.models import QUEUE_TARAKO
from mkt.site.decorators import json_view, login_required, permission_required
from mkt.site.utils import paginate
from mkt.users.models import UserProfile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
log = commonware.log.getLogger('z.lookup')
@login_required
@permission_required([('Lookup', 'View')])
def home(request):
tx_form = TransactionSearchForm()
return render(request, 'lookup/home.html', {'tx_form': tx_form})
@login_required
@permission_required([('AccountLookup', 'View')])
def user_summary(request, user_id):
user = get_object_or_404(UserProfile, pk=user_id)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
app_summary = _app_summary(user.pk)
# All refunds that this user has requested (probably as a consumer).
req = Refund.objects.filter(contribution__user=user)
# All instantly-approved refunds that this user has requested.
appr = req.filter(status=mkt.REFUND_APPROVED_INSTANT)
refund_summary = {'approved': appr.count(),
'requested': req.count()}
user_addons = user.addons.order_by('-created')
user_addons = paginate(request, user_addons, per_page=15)
payment_data = (AddonPaymentData.objects.filter(addon__authors=user)
.values(*AddonPaymentData.address_fields())
.distinct())
# If the user is deleted, get the log detailing the delete.
try:
delete_log = ActivityLog.objects.for_user(user).filter(
action=mkt.LOG.DELETE_USER_LOOKUP.id)[0]
except IndexError:
delete_log = None
provider_portals = get_payment_provider_portals(user=user)
return render(request, 'lookup/user_summary.html',
{'account': user, 'app_summary': app_summary,
'delete_form': DeleteUserForm(), 'delete_log': delete_log,
'is_admin': is_admin, 'refund_summary': refund_summary,
'user_addons': user_addons, 'payment_data': payment_data,
'provider_portals': provider_portals})
@login_required
@permission_required([('AccountLookup', 'View')])
def user_delete(request, user_id):
delete_form = DeleteUserForm(request.POST)
if not delete_form.is_valid():
messages.error(request, delete_form.errors)
return HttpResponseRedirect(reverse('lookup.user_summary',
args=[user_id]))
user = get_object_or_404(UserProfile, pk=user_id)
user.deleted = True
user.save() # Must call the save function to delete user.
mkt.log(mkt.LOG.DELETE_USER_LOOKUP, user,
details={'reason': delete_form.cleaned_data['delete_reason']},
user=request.user)
return HttpResponseRedirect(reverse('lookup.user_summary', args=[user_id]))
@login_required
@permission_required([('Transaction', 'View')])
def transaction_summary(request, tx_uuid):
tx_data = _transaction_summary(tx_uuid)
if not tx_data:
raise Http404
tx_form = TransactionSearchForm()
tx_refund_form = TransactionRefundForm()
return render(request, 'lookup/transaction_summary.html',
dict({'uuid': tx_uuid, 'tx_form': tx_form,
'tx_refund_form': tx_refund_form}.items() +
tx_data.items()))
def _transaction_summary(tx_uuid):
"""Get transaction details from Solitude API."""
contrib = get_object_or_404(Contribution, uuid=tx_uuid)
contrib_id = contrib.transaction_id
refund_contribs = contrib.get_refund_contribs()
refund_contrib = refund_contribs[0] if refund_contribs.exists() else None
lookup = {'status': True, 'transaction': True}
pay = {}
try:
pay = client.api.generic.transaction.get_object_or_404(uuid=contrib_id)
except ObjectDoesNotExist:
log.warning('Transaction not found in solitude: {0}'.format(tx_uuid))
lookup['transaction'] = False
if pay.get('provider') == PROVIDER_BANGO:
# If we are processing a Bango refund, then support would also like to
# know the package id.
try:
pay['package_id'] = (client.api.by_url(pay['seller'])
.get_object_or_404()['bango']['package_id'])
except (KeyError, ObjectDoesNotExist):
log.warning('Failed to find Bango package_id: {0}'.format(tx_uuid))
# Get refund status.
refund_status = None
if refund_contrib and refund_contrib.refund.status == mkt.REFUND_PENDING:
try:
status = client.api.bango.refund.get_object_or_404(
data={'uuid': refund_contrib.transaction_id})
refund_status = SOLITUDE_REFUND_STATUSES[status['status']]
except (KeyError, HttpServerError):
lookup['status'] = False
log.warning('Refund lookup failed: {0}'.format(tx_uuid))
return {
# Solitude data.
'lookup': lookup,
'amount': pay.get('amount'),
'currency': pay.get('currency'),
'package_id': pay.get('package_id'),
'provider': PROVIDER_LOOKUP.get(pay.get('provider')),
'refund_status': refund_status,
'support': pay.get('uid_support'),
'timestamp': pay.get('created'),
# Zamboni data.
'app': contrib.addon,
'contrib': contrib,
'related': contrib.related,
'type': mkt.CONTRIB_TYPES.get(contrib.type, _('Incomplete')),
# Filter what is refundable.
'is_refundable': ((contrib.type == mkt.CONTRIB_PURCHASE) and
not refund_contrib),
}
@require_POST
@login_required
@permission_required([('Transaction', 'Refund')])
def transaction_refund(request, tx_uuid):
contrib = get_object_or_404(Contribution, uuid=tx_uuid,
type=mkt.CONTRIB_PURCHASE)
refund_contribs = contrib.get_refund_contribs()
refund_contrib = refund_contribs[0] if refund_contribs.exists() else None
if refund_contrib:
messages.error(request, _('A refund has already been processed.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
form = TransactionRefundForm(request.POST)
if not form.is_valid():
return render(request, 'lookup/transaction_summary.html',
dict({'uuid': tx_uuid, 'tx_refund_form': form,
'tx_form': TransactionSearchForm()}.items() +
_transaction_summary(tx_uuid).items()))
data = {'uuid': contrib.transaction_id,
'manual': form.cleaned_data['manual']}
if settings.BANGO_FAKE_REFUNDS:
data['fake_response_status'] = {'responseCode':
form.cleaned_data['fake']}
try:
res = client.api.bango.refund.post(data)
except (HttpClientError, HttpServerError):
# Either doing something not supposed to or Solitude had an issue.
log.exception('Refund error: %s' % tx_uuid)
messages.error(
request,
_('You cannot make a refund request for this transaction.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
if res['status'] in [PENDING, COMPLETED]:
# Create refund Contribution by cloning the payment Contribution.
refund_contrib = Contribution.objects.get(id=contrib.id)
refund_contrib.id = None
refund_contrib.save()
log.info('Creating refund transaction from: {0} '
'with transaction_id of: {1}'
.format(contrib.id, res['uuid']))
refund_contrib.update(
type=mkt.CONTRIB_REFUND, related=contrib,
uuid=str(uuid.uuid4()),
amount=-refund_contrib.amount if refund_contrib.amount else None,
transaction_id=res['uuid'])
if res['status'] == PENDING:
# Create pending Refund.
refund_contrib.enqueue_refund(
mkt.REFUND_PENDING, request.user,
refund_reason=form.cleaned_data['refund_reason'])
log.info('Refund pending: %s' % tx_uuid)
messages.success(
request, _('Refund for this transaction now pending.'))
elif res['status'] == COMPLETED:
# Create approved Refund.
refund_contrib.enqueue_refund(
mkt.REFUND_APPROVED, request.user,
refund_reason=form.cleaned_data['refund_reason'])
log.info('Refund approved: %s' % tx_uuid)
messages.success(
request, _('Refund for this transaction successfully approved.'))
elif res['status'] == FAILED:
# Bango no like.
log.error('Refund failed: %s' % tx_uuid)
messages.error(
request, _('Refund request for this transaction failed.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
@login_required
@permission_required([('AppLookup', 'View')])
def app_summary(request, addon_id):
app = get_object_or_404(Webapp.with_deleted, pk=addon_id)
if 'prioritize' in request.POST and not app.priority_review:
app.update(priority_review=True)
msg = u'Priority Review Requested'
# Create notes and log entries.
create_comm_note(app, app.latest_version, request.user, msg,
note_type=comm.PRIORITY_REVIEW_REQUESTED)
mkt.log(mkt.LOG.PRIORITY_REVIEW_REQUESTED, app, app.latest_version,
created=datetime.now(), details={'comments': msg})
authors = (app.authors.filter(addonuser__role__in=(mkt.AUTHOR_ROLE_DEV,
mkt.AUTHOR_ROLE_OWNER))
.order_by('display_name'))
if app.premium and app.premium.price:
price = app.premium.price
else:
price = None
purchases, refunds = _app_purchases_and_refunds(app)
provider_portals = get_payment_provider_portals(app=app)
versions = None
status_form = APIStatusForm(initial={
'status': mkt.STATUS_CHOICES_API[app.status]
})
version_status_forms = {}
if app.is_packaged:
versions = app.versions.all().order_by('-created')
for v in versions:
version_status_forms[v.pk] = APIFileStatusForm(initial={
'status': mkt.STATUS_CHOICES_API[v.all_files[0].status]
})
permissions = {}
if app.latest_version:
permissions = app.latest_version.manifest.get('permissions', {})
return render(request, 'lookup/app_summary.html', {
'abuse_reports': app.abuse_reports.count(), 'app': app,
'authors': authors, 'purchases': purchases, 'refunds': refunds,
'price': price, 'provider_portals': provider_portals,
'status_form': status_form, 'versions': versions,
'is_tarako': app.tags.filter(tag_text=QUEUE_TARAKO).exists(),
'tarako_review':
app.additionalreview_set.latest_for_queue(QUEUE_TARAKO),
'version_status_forms': version_status_forms,
'permissions': permissions,
})
@login_required
@permission_required([('AccountLookup', 'View')])
def app_activity(request, addon_id):
"""Shows the app activity age for single app."""
app = get_object_or_404(Webapp.with_deleted, pk=addon_id)
user_items = ActivityLog.objects.for_apps([app]).exclude(
action__in=mkt.LOG_HIDE_DEVELOPER)
admin_items = ActivityLog.objects.for_apps([app]).filter(
action__in=mkt.LOG_HIDE_DEVELOPER)
user_items = paginate(request, user_items, per_page=20)
admin_items = paginate(request, admin_items, per_page=20)
return render(request, 'lookup/app_activity.html', {
'admin_items': admin_items, 'app': app, 'user_items': user_items})
@login_required
@permission_required([('BangoPortal', 'Redirect')])
def bango_portal_from_package(request, package_id):
response = _redirect_to_bango_portal(package_id,
'package_id: %s' % package_id)
if 'Location' in response:
return HttpResponseRedirect(response['Location'])
else:
message = (json.loads(response.content)
.get('__all__', response.content)[0])
messages.error(request, message)
return HttpResponseRedirect(reverse('lookup.home'))
@login_required
@permission_required([('AccountLookup', 'View')])
def user_purchases(request, user_id):
"""Shows the purchase page for another user."""
user = get_object_or_404(UserProfile, pk=user_id)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
products = purchase_list(request, user)
return render(request, 'lookup/user_purchases.html',
{'pager': products, 'account': user, 'is_admin': is_admin,
'single': bool(None), 'show_link': False})
@login_required
@permission_required([('AccountLookup', 'View')])
def user_activity(request, user_id):
"""Shows the user activity page for another user."""
user = get_object_or_404(UserProfile, pk=user_id)
products = purchase_list(request, user)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
user_items = ActivityLog.objects.for_user(user).exclude(
action__in=mkt.LOG_HIDE_DEVELOPER)
admin_items = ActivityLog.objects.for_user(user).filter(
action__in=mkt.LOG_HIDE_DEVELOPER)
mkt.log(mkt.LOG.ADMIN_VIEWED_LOG, request.user, user=user)
return render(request, 'lookup/user_activity.html',
{'pager': products, 'account': user, 'is_admin': is_admin,
'single': bool(None),
'user_items': user_items, 'admin_items': admin_items,
'show_link': False})
def _expand_query(q, fields):
should = []
for field in fields:
should.append(ES_Q('term', **{field: {'value': q, 'boost': 10}}))
should.append(ES_Q('match', **{field: {'query': q, 'boost': 4,
'type': 'phrase'}}))
should.append(ES_Q('match', **{field: {'query': q, 'boost': 3}}))
should.append(ES_Q('fuzzy', **{field: {'value': q, 'boost': 2,
'prefix_length': 4}}))
should.append(ES_Q('prefix', **{field: {'value': q, 'boost': 1.5}}))
return query.Bool(should=should)
@login_required
@permission_required([('AccountLookup', 'View')])
@json_view
def user_search(request):
results = []
q = request.GET.get('q', u'').lower().strip()
search_fields = ('fxa_uid', 'display_name', 'email')
fields = ('id',) + search_fields
if q.isnumeric():
# id is added implictly by the ES filter. Add it explicitly:
qs = UserProfile.objects.filter(pk=q).values(*fields)
else:
qs = UserProfile.objects.all()
filters = Q()
for field in search_fields:
filters = filters | Q(**{'%s__icontains' % field: q})
qs = qs.filter(filters)
qs = qs.values(*fields)
qs = _slice_results(request, qs)
for user in qs:
user['url'] = reverse('lookup.user_summary', args=[user['id']])
results.append(user)
return {'results': results}
@login_required
@permission_required([('Transaction', 'View')])
def transaction_search(request):
tx_form = TransactionSearchForm(request.GET)
if tx_form.is_valid():
return redirect(reverse('lookup.transaction_summary',
args=[tx_form.cleaned_data['q']]))
else:
return render(request, 'lookup/home.html', {'tx_form': tx_form})
@login_required
@permission_required([('AppLookup', 'View')])
@json_view
def app_search(request):
results = []
q = request.GET.get('q', u'').lower().strip()
limit = (lkp.MAX_RESULTS if request.GET.get('all_results')
else lkp.SEARCH_LIMIT)
fields = ('name', 'app_slug')
non_es_fields = ['id', 'name__localized_string'] + list(fields)
if q.isnumeric():
qs = Webapp.objects.filter(pk=q).values(*non_es_fields)[:limit]
else:
# Try to load by GUID:
qs = Webapp.objects.filter(guid=q).values(*non_es_fields)[:limit]
if not qs.count():
# TODO: Update to `.fields(...)` when the DSL supports it.
qs = (WebappIndexer.search()
.query(_expand_query(q, fields))[:limit])
qs = qs.execute()
for app in qs:
if isinstance(app, dict):
# This is a result from the database.
app['url'] = reverse('lookup.app_summary', args=[app['id']])
app['name'] = app['name__localized_string']
results.append(app)
else:
# This is a result from elasticsearch which returns `Result`
# objects and "name_translations" as a list, one for each locale.
for trans in app.name_translations:
results.append({
'id': app.id,
'url': reverse('lookup.app_summary', args=[app.id]),
'app_slug': app.get('app_slug'),
'name': trans['string'],
})
return {'results': results}
def _app_summary(user_id):
sql = """
select currency,
sum(case when type=%(purchase)s then 1 else 0 end)
as app_total,
sum(case when type=%(purchase)s then amount else 0.0 end)
as app_amount
from stats_contributions
where user_id=%(user_id)s
group by currency
"""
cursor = connection.cursor()
cursor.execute(sql, {'user_id': user_id,
'purchase': mkt.CONTRIB_PURCHASE})
summary = {'app_total': 0,
'app_amount': {}}
cols = [cd[0] for cd in cursor.description]
while 1:
row = cursor.fetchone()
if not row:
break
row = dict(zip(cols, row))
for cn in cols:
if cn.endswith('total'):
summary[cn] += row[cn]
elif cn.endswith('amount'):
summary[cn][row['currency']] = row[cn]
return summary
def _app_purchases_and_refunds(addon):
purchases = {}
now = datetime.now()
base_qs = (Contribution.objects.values('currency')
.annotate(total=Count('id'),
amount=Sum('amount'))
.filter(addon=addon)
.exclude(type__in=[mkt.CONTRIB_REFUND,
mkt.CONTRIB_CHARGEBACK,
mkt.CONTRIB_PENDING]))
for typ, start_date in (('last_24_hours', now - timedelta(hours=24)),
('last_7_days', now - timedelta(days=7)),
('alltime', None),):
qs = base_qs.all()
if start_date:
qs = qs.filter(created__gte=start_date)
sums = list(qs)
purchases[typ] = {'total': sum(s['total'] for s in sums),
'amounts': [numbers.format_currency(s['amount'],
s['currency'])
for s in sums if s['currency']]}
refunds = {}
rejected_q = Q(status=mkt.REFUND_DECLINED) | Q(status=mkt.REFUND_FAILED)
qs = Refund.objects.filter(contribution__addon=addon)
refunds['requested'] = qs.exclude(rejected_q).count()
percent = 0.0
total = purchases['alltime']['total']
if total:
percent = (refunds['requested'] / float(total)) * 100.0
refunds['percent_of_purchases'] = '%.1f%%' % percent
refunds['auto-approved'] = (qs.filter(status=mkt.REFUND_APPROVED_INSTANT)
.count())
refunds['approved'] = qs.filter(status=mkt.REFUND_APPROVED).count()
refunds['rejected'] = qs.filter(rejected_q).count()
return purchases, refunds
def _slice_results(request, qs):
if request.GET.get('all_results'):
return qs[:lkp.MAX_RESULTS]
else:
return qs[:lkp.SEARCH_LIMIT]
def get_payment_provider_portals(app=None, user=None):
"""
Get a list of dicts describing the payment portals for this app or user.
Either app or user is required.
"""
provider_portals = []
if app:
q = dict(addon=app)
elif user:
q = dict(payment_account__user=user)
else:
raise ValueError('user or app is required')
for acct in (AddonPaymentAccount.objects.filter(**q)
.select_related('payment_account')):
provider = get_provider(id=acct.payment_account.provider)
portal_url = provider.get_portal_url(acct.addon.app_slug)
if portal_url:
provider_portals.append({
'provider': provider,
'app': acct.addon,
'portal_url': portal_url,
'payment_account': acct.payment_account
})
return provider_portals
| bsd-3-clause | -2,779,730,549,008,845,300 | 38.396491 | 79 | 0.605094 | false |
jianajavier/pnc-cli | pnc_cli/swagger_client/apis/products_api.py | 1 | 15858 | # coding: utf-8
"""
ProductsApi.py
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ProductsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_all(self, **kwargs):
"""
Gets all Products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: ProductPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all" % key
)
params[key] = val
del params['kwargs']
resource_path = '/products'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
if 'page_index' in params:
query_params['pageIndex'] = params['page_index']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'q' in params:
query_params['q'] = params['q']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='ProductPage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_new(self, **kwargs):
"""
Creates a new Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_new(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ProductRest body:
:return: ProductSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_new" % key
)
params[key] = val
del params['kwargs']
resource_path = '/products'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='ProductSingleton',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_specific(self, id, **kwargs):
"""
Get specific Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Product id (required)
:return: ProductSingleton
If the method is called asynchronously,
returns the request thread.
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `get_specific`")
all_params = ['id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_specific" % key
)
params[key] = val
del params['kwargs']
resource_path = '/products/{id}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='ProductSingleton',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update(self, id, **kwargs):
"""
Updates an existing Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Product id (required)
:param ProductRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `update`")
all_params = ['id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
resource_path = '/products/{id}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_product_versions(self, id, **kwargs):
"""
Get all versions for a Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_versions(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Product id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: ProductVersionPage
If the method is called asynchronously,
returns the request thread.
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `get_product_versions`")
all_params = ['id', 'page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_versions" % key
)
params[key] = val
del params['kwargs']
resource_path = '/products/{id}/product-versions'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'page_index' in params:
query_params['pageIndex'] = params['page_index']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'q' in params:
query_params['q'] = params['q']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='ProductVersionPage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| apache-2.0 | 1,099,622,853,851,888,500 | 33.929515 | 103 | 0.507756 | false |
annayqho/TheCannon | code/lamost/xcalib_5labels/make_lamost_catalog/write_table.py | 1 | 3066 | import numpy as np
print("loading test data")
id_test = np.load("id_all.npz")['arr_0']
label_test = np.load("label_all.npz")['arr_0'].T
err_test = np.load("errs_all.npz")['arr_0'].T
npix_test = np.load("npix_all.npz")['arr_0']
# when a fit fails, I set the error to -9999
print("loading test errs")
teff_err_test = np.sqrt(err_test[:,0])
logg_err_test = np.sqrt(err_test[:,1])
feh_err_test = np.sqrt(err_test[:,2])
alpha_err_test = np.sqrt(err_test[:,3])
ak_err_test = np.sqrt(err_test[:,4])
chisq_test = np.load("chisq_all.npz")['arr_0']
teff_test = label_test[:,0]
logg_test = label_test[:,1]
feh_test = label_test[:,2]
alpha_test = label_test[:,3]
ak_test = label_test[:,4]
# add in the training set
print("loading training data")
direc = "../xcalib_5labels"
direc1 = "/Users/annaho/TheCannon/data/lamost_paper"
id_training = np.load("%s/ref_id.npz" %direc1)['arr_0']
ngoodpix_training = np.sum(
np.load("%s/ref_ivar.npz" %direc1)['arr_0'] > 0, axis=1)
label_training = np.load("%s/all_cannon_label_vals.npz" %direc)['arr_0']
err_training = np.load("%s/all_cannon_label_errs.npz" %direc)['arr_0']
chisq_training = np.load("%s/all_cannon_label_chisq.npz" %direc)['arr_0']
id_total = np.append(id_test, id_training)
teff_total = np.append(teff_test, label_training[:,0])
teff_err_total = np.append(teff_err_test, err_training[:,0])
logg_total = np.append(logg_test,label_training[:,1])
logg_err_total = np.append(logg_err_test,err_training[:,1])
feh_total = np.append(feh_test,label_training[:,2])
feh_err_total = np.append(feh_err_test,err_training[:,2])
alpha_total = np.append(alpha_test, label_training[:,3])
alpha_err_total = np.append(alpha_err_test, err_training[:,3])
ak_total = np.append(ak_test, label_training[:,4])
ak_err_total = np.append(ak_err_test,err_training[:,4])
npix_total = np.append(npix_test, ngoodpix_training)
chisq_total = np.append(chisq_test,chisq_training)
print("finding unique values")
id_all, inds_unique = np.unique(id_total, return_index=True)
teff = teff_total[inds_unique]
teff_err = teff_err_total[inds_unique]
logg = logg_total[inds_unique]
logg_err = logg_err_total[inds_unique]
feh = feh_total[inds_unique]
feh_err = feh_err_total[inds_unique]
alpha = alpha_total[inds_unique]
alpha_err = alpha_err_total[inds_unique]
ak = ak_total[inds_unique]
ak_err = ak_err_total[inds_unique]
npix = npix_total[inds_unique]
chisq_all = chisq_total[inds_unique]
print("writing file")
outfile = "lamost_catalog.csv"
#outfile = "lamost_catalog_abundances.csv"
fout = open(outfile, "w")
header = "id,teff,logg,m_h,alpha_m,a_k,teff_err,logg_err,mh_err,alpha_err,ak_err,ngoodpix,chisq\n"
#header = "id," + label_names
fout.write(header)
for i,id_val in enumerate(id_all):
id_val = id_val.decode("utf-8")
id_short = id_val.split("/")[-1]
line = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" %(
id_short, teff[i], logg[i], feh[i], alpha[i], ak[i],
teff_err[i], logg_err[i], feh_err[i], alpha_err[i], ak_err[i],
npix[i], chisq_all[i])
fout.write(line)
fout.flush()
fout.close()
| mit | -2,803,129,928,123,322,400 | 36.851852 | 98 | 0.66895 | false |
ina-foss/ID-Fits | lib/datasets/lfw.py | 1 | 6348 | # ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import os
import numpy as np
import config
LFW, LFW_A, LFW_OWN = ["lfw", "lfwa", "lfwn"]
datasets = {
LFW: "lfw",
LFW_A: "lfw2",
LFW_OWN: "lfwn"
}
lfw_path = os.path.join(config.data_path, "lfw")
pairs_file = os.path.join(lfw_path, "pairs.txt")
people_file = os.path.join(lfw_path, "people.txt")
mapping_file = os.path.join(lfw_path, "mapping.txt")
def loadPeopleFile(filename, mapping):
f = open(filename, 'r')
f.readline()
people = []
for line in f.readlines():
name, nb = line.split()
nb = int(nb)
people += [mapping[(name, index)] for index in range(1, nb+1)]
return people
def loadPairsFile(filename, mapping):
f = open(filename, 'r')
line = f.readline().split()
if len(line) == 1:
s = 1
n = int(line[0])
else:
s = int(line[0])
n = int(line[1])
pairs = [({}, {}) for i in range(s)]
for matching_pairs, mismatching_pairs in pairs:
for i in range(n):
name, index1, index2 = f.readline().split()
index1 = int(index1)
index2 = int(index2)
if mapping[(name, index1)] not in matching_pairs:
matching_pairs[mapping[(name, index1)]] = []
matching_pairs[mapping[(name, index1)]].append(mapping[(name, index2)])
for i in range(n):
name1, index1, name2, index2 = f.readline().split()
index1 = int(index1)
index2 = int(index2)
if mapping[(name1, index1)] not in mismatching_pairs:
mismatching_pairs[mapping[(name1, index1)]] = []
mismatching_pairs[mapping[(name1, index1)]].append(mapping[(name2, index2)])
if s > 1:
return pairs
else:
return pairs[0]
def loadData(dataset, preprocess=False):
filename = os.path.join(lfw_path, dataset+".npy")
if not os.path.exists(filename):
raise Exception("Dataset %s unknown"%dataset)
data = np.load(filename)
if preprocess:
return preprocessData(data)
else:
return data
def preprocessData(raw_data):
return raw_data[:, 49:-49, 84:-84]
def loadSetsGroundTruth():
mapping = loadMapping()
return loadPairsFile(pairs_file, mapping)
def loadTestSets():
mapping = loadMapping()
sets = []
with open(people_file, 'r') as f:
sets_number = int(f.readline())
for _ in range(sets_number):
sets.append([])
set_size = int(f.readline())
for _ in range(set_size):
name, number = f.readline().split()
for index in range(1, int(number)+1):
sets[-1].append(mapping[(name, int(index))])
return sets
def loadTrainingSets():
sets = loadTestSets()
training_sets = []
for i in range(len(sets)):
training_sets.append([])
for k in range(len(sets)-1):
training_sets[-1] += sets[(i+k+1) % len(sets)]
return training_sets
def loadMapping():
mapping = dict()
with open(mapping_file, "r") as f:
f.readline()
for line in f.readlines():
name, index, global_index = line.split()
index, global_index = int(index), int(global_index)
mapping[(name,index)] = global_index
return mapping
def loadDevData(subset="train", load_pairs=True, filename=None):
if filename:
if os.path.exists(filename):
people_file = filename
pairs_file = None
else:
raise ValueError("Unknown file %s"%filename)
else:
if subset == "train":
people_file = os.path.join(lfw_path, "peopleDevTrain.txt")
pairs_file = os.path.join(lfw_path, "pairsDevTrain.txt")
elif subset == "test":
people_file = os.path.join(lfw_path, "peopleDevTest.txt")
pairs_file = os.path.join(lfw_path, "pairsDevTest.txt")
else:
raise ValueError("Unknown subset value")
mapping = loadMapping()
if load_pairs and pairs_file:
return loadPeopleFile(people_file, mapping), loadPairsFile(pairs_file, mapping)
else:
return loadPeopleFile(people_file, mapping)
def loadTrainingDataLabels(training_set, min_nb_samples_per_class = 10):
mapping = loadMapping()
samples_per_classes = {}
classes, _ = zip(*mapping)
classes_count = {}
for name, index in mapping:
if mapping[(name, index)] in training_set:
if name not in classes_count:
classes_count[name] = 0
classes_count[name] += 1
kept_classes = []
for name, count in classes_count.iteritems():
if count >= min_nb_samples_per_class:
kept_classes.append(name)
classes_id = dict(zip(kept_classes, range(len(kept_classes))))
descs_indexes = []
y = []
for name, index in mapping:
if name in kept_classes and mapping[(name, index)] in training_set:
new_index = training_set.index(mapping[(name, index)])
descs_indexes.append(new_index)
y.append(classes_id[name])
return descs_indexes, np.array(y, dtype=np.int)
def reindex(indexes, ground_truth_mapping):
result_mapping = []
for mapping in ground_truth_mapping:
new_mapping = {}
for k in mapping.keys():
l = mapping[k]
new_mapping[indexes.index(k)] = []
for e in l:
new_mapping[indexes.index(k)].append(indexes.index(e))
result_mapping.append(new_mapping)
return tuple(result_mapping)
| lgpl-3.0 | -6,367,440,375,545,659,000 | 26.480519 | 88 | 0.59373 | false |
n2o/guhema | products/admin.py | 1 | 4627 | from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from modeltranslation.admin import TranslationAdmin
from pagedown.widgets import AdminPagedownWidget
from .models import (BandSawBlade, BandSawBladeIndicator, CircularSawBlade,
CircularSawBladeIndicator, Clamping, HackSawBlade,
HoleSaw, HoleSawDiameter, Indicator, JigSawBlade,
ProductGroup, SableSawBlade, SawBlade)
class PageDownAdmin(TranslationAdmin):
formfield_overrides = {
models.TextField: {'widget': AdminPagedownWidget}
}
def has_img_set(self, obj):
if obj.image:
return mark_safe("<img src='/static/admin/img/icon-yes.svg' alt='True'>")
else:
return mark_safe("<img src='/static/admin/img/icon-no.svg' alt='False'>")
@admin.register(SawBlade)
class SawBladeAdmin(PageDownAdmin):
list_display = ('type', 'group', 'quality', 'has_img_set')
search_fields = ['type', 'quality', 'description']
@admin.register(SableSawBlade)
class SableSawBladeAdmin(PageDownAdmin):
list_display = ('type', 'group', 'quality', 'toothing', 'cutting_metal', 'cutting_wood', 'cutting_minerals', 'has_img_set')
search_fields = ['type', 'quality', 'description']
save_as = True
@admin.register(HackSawBlade)
class HackSawBladeAdmin(PageDownAdmin):
list_display = ('type', 'group', 'quality', 'toothing', 'accessory', 'cutting_metal', 'cutting_wood', 'cutting_minerals', 'has_img_set')
search_fields = ['type', 'quality', 'description']
fieldsets = [
(_('Allgemein'), {'fields': ['quality', 'type', 'name', 'subcategory', 'description', 'group']}),
(_('Beschreibung'), {'fields': ['image', 'indicators', 'toothing', ('cutting_metal', 'cutting_wood', 'cutting_minerals'), 'accessory']}),
]
save_as = True
@admin.register(Indicator)
class IndicatorAdmin(PageDownAdmin):
fieldsets = [
(_('Allgemein'), {'fields': ['value']}),
(_('Abmessungen'), {'fields': ['width', 'strength', 'length', 'diameter']}),
(_('ZpZ'), {'fields': [('C', 'E', 'G', 'H', 'I', 'J', 'L', 'M', 'N', 'O', 'T', 'U', 'V', 'W', 'UE')]}),
]
@admin.register(HoleSaw)
class HoleSawAdmin(PageDownAdmin):
list_display = ('ordernr', 'category', 'has_img_set')
search_fields = ['ordernr']
@admin.register(ProductGroup)
class ProductGroupAdmin(PageDownAdmin):
list_display = ('name', 'public')
fieldsets = [
(_('Allgemein'), {'fields': ['name', 'description', 'image', 'public']}),
]
@admin.register(BandSawBlade)
class BandSawBladeAdmin(PageDownAdmin):
list_display = ('type', 'type2', 'group', 'quality', 'has_img_set')
search_fields = ['type', 'type2', 'quality', 'description']
fieldsets = [
(_('Allgemein'), {'fields': ['quality', 'name', 'heading', 'description', 'group']}),
(_('Ausführungen'), {'fields': [('type', 'type_description'), ('type2', 'type2_description'), ('image', 'image2'), 'bandsaw_indicators', 'cols']}),
]
@admin.register(JigSawBlade)
class JigSawBladeAdmin(PageDownAdmin):
list_display = ('type', 'subcategory', 'has_img_set')
search_fields = ['type', 'subcategory']
fieldsets = [
(_('Allgemein'), {'fields': ['quality', 'subcategory', 'description', 'group']}),
(_('Ausführungen'), {'fields': ['type', 'image', 'tooth_separation', 'length', ('cutting_metal', 'cutting_wood')]}),
]
@admin.register(CircularSawBlade)
class CircularSawBladeAdmin(PageDownAdmin):
list_display = ('type', 'quality', 'has_img_set')
search_fields = ['type']
fieldsets = [
(_('Allgemein'), {'fields': ['quality', 'type', 'name', 'description', 'group']}),
(_('Kennziffer') + '1', {'fields': ['circular_indicators']}),
]
# Define a new FlatPageAdmin
class MyFlatPageAdmin(PageDownAdmin):
fieldsets = (
(_('Allgemein'), {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {
'classes': ('collapse', ),
'fields': (
'enable_comments',
'registration_required',
'template_name',
),
}),
)
admin.site.register(CircularSawBladeIndicator)
admin.site.register(BandSawBladeIndicator)
admin.site.register(Clamping, TranslationAdmin)
admin.site.register(HoleSawDiameter)
# Re-register FlatPageAdmin
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, MyFlatPageAdmin)
| mit | 7,575,249,779,729,761,000 | 35.706349 | 155 | 0.626811 | false |
NervanaSystems/coach | rl_coach/orchestrators/kubernetes_orchestrator.py | 1 | 19633 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
import json
import time
import sys
from enum import Enum
from typing import List
from configparser import ConfigParser, Error
from multiprocessing import Process
from rl_coach.base_parameters import RunType
from rl_coach.orchestrators.deploy import Deploy, DeployParameters
from kubernetes import client as k8sclient, config as k8sconfig
from rl_coach.memories.backend.memory import MemoryBackendParameters
from rl_coach.memories.backend.memory_impl import get_memory_backend
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.data_store_impl import get_data_store
from rl_coach.logger import screen
class RunTypeParameters():
def __init__(self, image: str, command: list(), arguments: list() = None,
run_type: str = str(RunType.TRAINER), checkpoint_dir: str = "/checkpoint",
num_replicas: int = 1, orchestration_params: dict=None):
self.image = image
self.command = command
if not arguments:
arguments = list()
self.arguments = arguments
self.run_type = run_type
self.checkpoint_dir = checkpoint_dir
self.num_replicas = num_replicas
if not orchestration_params:
orchestration_params = dict()
self.orchestration_params = orchestration_params
class KubernetesParameters(DeployParameters):
def __init__(self, run_type_params: List[RunTypeParameters], kubeconfig: str = None, namespace: str = None,
nfs_server: str = None, nfs_path: str = None, checkpoint_dir: str = '/checkpoint',
memory_backend_parameters: MemoryBackendParameters = None, data_store_params: DataStoreParameters = None):
self.run_type_params = {}
for run_type_param in run_type_params:
self.run_type_params[run_type_param.run_type] = run_type_param
self.kubeconfig = kubeconfig
self.namespace = namespace
self.nfs_server = nfs_server
self.nfs_path = nfs_path
self.checkpoint_dir = checkpoint_dir
self.memory_backend_parameters = memory_backend_parameters
self.data_store_params = data_store_params
class Kubernetes(Deploy):
"""
An orchestrator implmentation which uses Kubernetes to deploy the components such as training and rollout workers
and Redis Pub/Sub in Coach when used in the distributed mode.
"""
def __init__(self, params: KubernetesParameters):
"""
:param params: The Kubernetes parameters which are used for deploying the components in Coach. These parameters
include namespace and kubeconfig.
"""
super().__init__(params)
self.params = params
if self.params.kubeconfig:
k8sconfig.load_kube_config()
else:
k8sconfig.load_incluster_config()
if not self.params.namespace:
_, current_context = k8sconfig.list_kube_config_contexts()
self.params.namespace = current_context['context']['namespace']
if os.environ.get('http_proxy'):
k8sclient.Configuration._default.proxy = os.environ.get('http_proxy')
self.params.memory_backend_parameters.orchestrator_params = {'namespace': self.params.namespace}
self.memory_backend = get_memory_backend(self.params.memory_backend_parameters)
self.params.data_store_params.orchestrator_params = {'namespace': self.params.namespace}
self.params.data_store_params.namespace = self.params.namespace
self.data_store = get_data_store(self.params.data_store_params)
if self.params.data_store_params.store_type == "s3":
self.s3_access_key = None
self.s3_secret_key = None
if self.params.data_store_params.creds_file:
s3config = ConfigParser()
s3config.read(self.params.data_store_params.creds_file)
try:
self.s3_access_key = s3config.get('default', 'aws_access_key_id')
self.s3_secret_key = s3config.get('default', 'aws_secret_access_key')
except Error as e:
screen.print("Error when reading S3 credentials file: %s", e)
else:
self.s3_access_key = os.environ.get('ACCESS_KEY_ID')
self.s3_secret_key = os.environ.get('SECRET_ACCESS_KEY')
def setup(self, crd=None) -> bool:
"""
Deploys the memory backend and data stores if required.
"""
self.memory_backend.deploy()
if self.params.data_store_params.store_type == "redis":
self.data_store.params.redis_address = self.memory_backend.params.redis_address
self.data_store.params.redis_port = self.memory_backend.params.redis_port
if not self.data_store.deploy():
return False
if self.params.data_store_params.store_type == "nfs":
self.nfs_pvc = self.data_store.get_info()
# Upload checkpoints in checkpoint_restore_dir (if provided) to the data store
self.data_store.setup_checkpoint_dir(crd)
return True
def deploy_trainer(self) -> bool:
"""
Deploys the training worker in Kubernetes.
"""
trainer_params = self.params.run_type_params.get(str(RunType.TRAINER), None)
if not trainer_params:
return False
trainer_params.command += ['--memory_backend_params', json.dumps(self.params.memory_backend_parameters.__dict__)]
trainer_params.command += ['--data_store_params', json.dumps(self.params.data_store_params.__dict__)]
name = "{}-{}".format(trainer_params.run_type, uuid.uuid4())
# TODO: instead of defining each container and template spec from scratch, loaded default
# configuration and modify them as necessary depending on the store type
if self.params.data_store_params.store_type == "nfs":
container = k8sclient.V1Container(
name=name,
image=trainer_params.image,
command=trainer_params.command,
args=trainer_params.arguments,
image_pull_policy='Always',
volume_mounts=[k8sclient.V1VolumeMount(
name='nfs-pvc',
mount_path=trainer_params.checkpoint_dir
)],
stdin=True,
tty=True
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
volumes=[k8sclient.V1Volume(
name="nfs-pvc",
persistent_volume_claim=self.nfs_pvc
)],
restart_policy='Never'
),
)
elif self.params.data_store_params.store_type == "s3":
container = k8sclient.V1Container(
name=name,
image=trainer_params.image,
command=trainer_params.command,
args=trainer_params.arguments,
image_pull_policy='Always',
env=[k8sclient.V1EnvVar("ACCESS_KEY_ID", self.s3_access_key),
k8sclient.V1EnvVar("SECRET_ACCESS_KEY", self.s3_secret_key)],
stdin=True,
tty=True
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
restart_policy='Never'
),
)
elif self.params.data_store_params.store_type == "redis":
container = k8sclient.V1Container(
name=name,
image=trainer_params.image,
command=trainer_params.command,
args=trainer_params.arguments,
image_pull_policy='Always',
stdin=True,
tty=True,
resources=k8sclient.V1ResourceRequirements(
limits={
"cpu": "24",
"memory": "4Gi",
"nvidia.com/gpu": "1",
}
),
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
restart_policy='Never'
),
)
else:
raise ValueError("unexpected store_type {}. expected 's3', 'nfs', 'redis'".format(
self.params.data_store_params.store_type
))
job_spec = k8sclient.V1JobSpec(
completions=1,
template=template
)
job = k8sclient.V1Job(
api_version="batch/v1",
kind="Job",
metadata=k8sclient.V1ObjectMeta(name=name),
spec=job_spec
)
api_client = k8sclient.BatchV1Api()
try:
api_client.create_namespaced_job(self.params.namespace, job)
trainer_params.orchestration_params['job_name'] = name
return True
except k8sclient.rest.ApiException as e:
screen.print("Got exception: %s\n while creating job", e)
return False
def deploy_worker(self):
"""
Deploys the rollout worker(s) in Kubernetes.
"""
worker_params = self.params.run_type_params.get(str(RunType.ROLLOUT_WORKER), None)
if not worker_params:
return False
# At this point, the memory backend and data store have been deployed and in the process,
# these parameters have been updated to include things like the hostname and port the
# service can be found at.
worker_params.command += ['--memory_backend_params', json.dumps(self.params.memory_backend_parameters.__dict__)]
worker_params.command += ['--data_store_params', json.dumps(self.params.data_store_params.__dict__)]
worker_params.command += ['--num_workers', '{}'.format(worker_params.num_replicas)]
name = "{}-{}".format(worker_params.run_type, uuid.uuid4())
# TODO: instead of defining each container and template spec from scratch, loaded default
# configuration and modify them as necessary depending on the store type
if self.params.data_store_params.store_type == "nfs":
container = k8sclient.V1Container(
name=name,
image=worker_params.image,
command=worker_params.command,
args=worker_params.arguments,
image_pull_policy='Always',
volume_mounts=[k8sclient.V1VolumeMount(
name='nfs-pvc',
mount_path=worker_params.checkpoint_dir
)],
stdin=True,
tty=True
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
volumes=[k8sclient.V1Volume(
name="nfs-pvc",
persistent_volume_claim=self.nfs_pvc
)],
restart_policy='Never'
),
)
elif self.params.data_store_params.store_type == "s3":
container = k8sclient.V1Container(
name=name,
image=worker_params.image,
command=worker_params.command,
args=worker_params.arguments,
image_pull_policy='Always',
env=[k8sclient.V1EnvVar("ACCESS_KEY_ID", self.s3_access_key),
k8sclient.V1EnvVar("SECRET_ACCESS_KEY", self.s3_secret_key)],
stdin=True,
tty=True
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
restart_policy='Never'
)
)
elif self.params.data_store_params.store_type == "redis":
container = k8sclient.V1Container(
name=name,
image=worker_params.image,
command=worker_params.command,
args=worker_params.arguments,
image_pull_policy='Always',
stdin=True,
tty=True,
resources=k8sclient.V1ResourceRequirements(
limits={
"cpu": "4",
"memory": "4Gi",
# "nvidia.com/gpu": "0",
}
),
)
template = k8sclient.V1PodTemplateSpec(
metadata=k8sclient.V1ObjectMeta(labels={'app': name}),
spec=k8sclient.V1PodSpec(
containers=[container],
restart_policy='Never'
)
)
else:
raise ValueError('unexpected store type {}'.format(self.params.data_store_params.store_type))
job_spec = k8sclient.V1JobSpec(
completions=worker_params.num_replicas,
parallelism=worker_params.num_replicas,
template=template
)
job = k8sclient.V1Job(
api_version="batch/v1",
kind="Job",
metadata=k8sclient.V1ObjectMeta(name=name),
spec=job_spec
)
api_client = k8sclient.BatchV1Api()
try:
api_client.create_namespaced_job(self.params.namespace, job)
worker_params.orchestration_params['job_name'] = name
return True
except k8sclient.rest.ApiException as e:
screen.print("Got exception: %s\n while creating Job", e)
return False
def worker_logs(self, path='./logs'):
"""
:param path: Path to store the worker logs.
"""
worker_params = self.params.run_type_params.get(str(RunType.ROLLOUT_WORKER), None)
if not worker_params:
return
api_client = k8sclient.CoreV1Api()
pods = None
try:
pods = api_client.list_namespaced_pod(self.params.namespace, label_selector='app={}'.format(
worker_params.orchestration_params['job_name']
))
# pod = pods.items[0]
except k8sclient.rest.ApiException as e:
screen.print("Got exception: %s\n while reading pods", e)
return
if not pods or len(pods.items) == 0:
return
for pod in pods.items:
Process(target=self._tail_log_file, args=(pod.metadata.name, api_client, self.params.namespace, path), daemon=True).start()
def _tail_log_file(self, pod_name, api_client, namespace, path):
if not os.path.exists(path):
os.mkdir(path)
sys.stdout = open(os.path.join(path, pod_name), 'w')
self.tail_log(pod_name, api_client)
def trainer_logs(self):
"""
Get the logs from trainer.
"""
trainer_params = self.params.run_type_params.get(str(RunType.TRAINER), None)
if not trainer_params:
return
api_client = k8sclient.CoreV1Api()
pod = None
try:
pods = api_client.list_namespaced_pod(self.params.namespace, label_selector='app={}'.format(
trainer_params.orchestration_params['job_name']
))
pod = pods.items[0]
except k8sclient.rest.ApiException as e:
screen.print("Got exception: %s\n while reading pods", e)
return
if not pod:
return
return self.tail_log(pod.metadata.name, api_client)
def tail_log(self, pod_name, corev1_api):
while True:
time.sleep(10)
# Try to tail the pod logs
try:
for line in corev1_api.read_namespaced_pod_log(
pod_name, self.params.namespace, follow=True,
_preload_content=False
):
screen.print(line.decode('utf-8'), flush=True, end='')
except k8sclient.rest.ApiException as e:
pass
# This part will get executed if the pod is one of the following phases: not ready, failed or terminated.
# Check if the pod has errored out, else just try again.
# Get the pod
try:
pod = corev1_api.read_namespaced_pod(pod_name, self.params.namespace)
except k8sclient.rest.ApiException as e:
continue
if not hasattr(pod, 'status') or not pod.status:
continue
if not hasattr(pod.status, 'container_statuses') or not pod.status.container_statuses:
continue
for container_status in pod.status.container_statuses:
if container_status.state.waiting is not None:
if container_status.state.waiting.reason == 'Error' or \
container_status.state.waiting.reason == 'CrashLoopBackOff' or \
container_status.state.waiting.reason == 'ImagePullBackOff' or \
container_status.state.waiting.reason == 'ErrImagePull':
return 1
if container_status.state.terminated is not None:
return container_status.state.terminated.exit_code
def undeploy(self):
"""
Undeploy all the components, such as trainer and rollout worker(s), Redis pub/sub and data store, when required.
"""
trainer_params = self.params.run_type_params.get(str(RunType.TRAINER), None)
api_client = k8sclient.BatchV1Api()
delete_options = k8sclient.V1DeleteOptions(
propagation_policy="Foreground"
)
if trainer_params:
try:
api_client.delete_namespaced_job(trainer_params.orchestration_params['job_name'], self.params.namespace, delete_options)
except k8sclient.rest.ApiException as e:
screen.print("Got exception: %s\n while deleting trainer", e)
worker_params = self.params.run_type_params.get(str(RunType.ROLLOUT_WORKER), None)
if worker_params:
try:
api_client.delete_namespaced_job(worker_params.orchestration_params['job_name'], self.params.namespace, delete_options)
except k8sclient.rest.ApiException as e:
screen.print("Got exception: %s\n while deleting workers", e)
self.memory_backend.undeploy()
self.data_store.undeploy()
| apache-2.0 | -2,720,742,902,227,571,000 | 39.817048 | 136 | 0.576173 | false |
lantip/aws-filemanager | cloud_browser/tests/tests_views.py | 1 | 2226 | """Cloud browser views.py tests."""
from django.test import TestCase
import mock
from cloud_browser.cloud import errors
from cloud_browser.cloud.base import CloudContainer
from cloud_browser.common import ROOT
from cloud_browser import views
class TestBrowserRedirect(TestCase):
"""Tests for browser_redirect."""
def setUp(self): # pylint: disable=invalid-name
self.cloudcontainer_patcher = mock.patch.object(CloudContainer,
'__init__')
self.redirect_patcher = mock.patch('cloud_browser.views.redirect')
self.container_fn = self.cloudcontainer_patcher.start()
self.redirect_fn = self.redirect_patcher.start()
def tearDown(self): # pylint: disable=invalid-name
self.cloudcontainer_patcher.stop()
self.redirect_patcher.stop()
def test_browser_redirect(self):
self.container_fn.name = 'redirect_test'
self.container_fn.has_directory.return_value = True
views.browser_redirect(self.container_fn, 'key/of/dir/')
self.container_fn.has_directory.assert_called_with('key/of/dir/')
self.redirect_fn.assert_called_with('cloud_browser_browser',
path='redirect_test/key/of/dir',
permanent=False)
views.browser_redirect(self.container_fn, ROOT)
self.container_fn.has_directory.assert_called_with('key/of/dir/')
self.redirect_fn.assert_called_with('cloud_browser_browser',
path='redirect_test',
permanent=False)
# pylint: disable=invalid-name
def test_browser_redirect_no_object_exception(self):
self.container_fn.name = 'redirect_test'
self.container_fn.has_directory.side_effect = errors.NoObjectException
views.browser_redirect(self.container_fn, 'key/of/dir/')
self.container_fn.has_directory.assert_called_with('key/of/dir/')
self.redirect_fn.assert_called_with('cloud_browser_browser',
path='redirect_test',
permanent=False)
| mit | -6,664,280,687,060,989,000 | 42.647059 | 78 | 0.608715 | false |
tonioo/modoboa | modoboa/admin/models/mailbox.py | 1 | 12828 | # -*- coding: utf-8 -*-
"""Models related to mailboxes management."""
from __future__ import unicode_literals
import os
import pwd
from reversion import revisions as reversion
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.db.models.manager import Manager
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core.models import User
from modoboa.lib import exceptions as lib_exceptions
from modoboa.lib.email_utils import split_mailbox
from modoboa.lib.sysutils import doveadm_cmd
from modoboa.parameters import tools as param_tools
from .base import AdminObject
from .domain import Domain
class QuotaManager(models.Manager):
"""Custom manager for Quota."""
def get_domain_usage(self, domain):
"""Return current usage for domain."""
qset = self.get_queryset().filter(
username__endswith="@{}".format(domain.name))
result = qset.aggregate(usage=models.Sum("bytes")).get("usage", 0)
if result is None:
result = 0
return result
class Quota(models.Model):
"""Keeps track of Mailbox current quota."""
username = models.EmailField(primary_key=True, max_length=254)
bytes = models.BigIntegerField(default=0) # NOQA:A003
messages = models.IntegerField(default=0)
objects = QuotaManager()
class Meta:
app_label = "admin"
class MailboxManager(Manager):
"""Custom manager for Mailbox."""
def get_for_admin(self, admin, squery=None):
"""Return the mailboxes that belong to this admin.
The result will contain the mailboxes defined for each domain that
user can see.
:param string squery: a search query
:return: a list of ``Mailbox`` objects
"""
qf = None
if squery is not None:
if "@" in squery:
parts = squery.split("@")
addrfilter = "@".join(parts[:-1])
domfilter = parts[-1]
qf = (
Q(address__contains=addrfilter) &
Q(domain__name__contains=domfilter)
)
else:
qf = (
Q(address__contains=squery) |
Q(domain__name__contains=squery)
)
ids = admin.objectaccess_set \
.filter(content_type=ContentType.objects.get_for_model(Mailbox)) \
.values_list("object_id", flat=True)
if qf is not None:
qf = Q(pk__in=ids) & qf
else:
qf = Q(pk__in=ids)
return self.get_queryset().select_related().filter(qf)
@python_2_unicode_compatible
class Mailbox(AdminObject):
"""User mailbox."""
address = models.CharField(
ugettext_lazy("address"), max_length=252,
help_text=ugettext_lazy(
"Mailbox address (without the @domain.tld part)")
)
quota = models.PositiveIntegerField(default=0)
use_domain_quota = models.BooleanField(default=False)
domain = models.ForeignKey(Domain, on_delete=models.CASCADE)
user = models.OneToOneField(User, on_delete=models.CASCADE)
objects = MailboxManager()
class Meta:
permissions = (
("view_mailboxes", "View mailboxes"),
)
app_label = "admin"
def __init__(self, *args, **kwargs):
super(Mailbox, self).__init__(*args, **kwargs)
self.__mail_home = None
self.old_full_address = self.full_address
def __str__(self):
return smart_text(self.full_address)
def __full_address(self, localpart):
return "%s@%s" % (localpart, self.domain.name)
@property
def full_address(self):
return self.__full_address(self.address)
@property
def enabled(self):
return self.user.is_active
@property
def alias_count(self):
return (
self.recipientalias_set.select_related("alias")
.filter(alias__internal=False).count()
)
@property
def mail_home(self):
"""Retrieve the home directory of this mailbox.
The home directory refers to the place on the file system
where the mailbox data is stored.
We ask dovecot to give us this information because there are
several patterns to understand and we don't want to implement
them.
"""
admin_params = dict(param_tools.get_global_parameters("admin"))
if not admin_params.get("handle_mailboxes"):
return None
if self.__mail_home is None:
curuser = pwd.getpwuid(os.getuid()).pw_name
mbowner = admin_params["mailboxes_owner"]
options = {}
if curuser != mbowner:
options["sudo_user"] = mbowner
code, output = doveadm_cmd(
"user -f home %s" % self.full_address, **options
)
if code:
raise lib_exceptions.InternalError(
_("Failed to retrieve mailbox location (%s)") % output)
self.__mail_home = output.strip()
return self.__mail_home
@property
def alias_addresses(self):
"""Return all alias address of this mailbox.
:rtype: list of string
"""
qset = (
self.aliasrecipient_set.select_related("alias")
.filter(alias__internal=False)
)
aliases = [alr.alias.address for alr in qset]
return aliases
@property
def quota_value(self):
"""Retrieve the ``Quota`` instance associated to this mailbox."""
if not hasattr(self, "_quota_value"):
try:
self._quota_value = Quota.objects.get(
username=self.full_address)
except Quota.DoesNotExist:
return None
return self._quota_value
@quota_value.setter
def quota_value(self, instance):
"""Set the ``Quota`` for this mailbox."""
self._quota_value = instance
def rename_dir(self, old_mail_home):
"""Rename local directory if needed."""
hm = param_tools.get_global_parameter(
"handle_mailboxes", raise_exception=False)
if not hm:
return
MailboxOperation.objects.create(
mailbox=self, type="rename", argument=old_mail_home
)
def rename(self, address, domain):
"""Rename the mailbox.
To update the associated Quota record, we must create a new
one first, update the foreign key and then we can delete the
original record!
:param string address: the new mailbox's address (local part)
:param Domain domain: the new mailbox's domain
"""
old_mail_home = self.mail_home
old_qvalue = self.quota_value
self.address = address
self.domain = domain
self.quota_value = Quota.objects.create(
username=self.full_address, bytes=old_qvalue.bytes,
messages=old_qvalue.messages
)
old_qvalue.delete()
self.rename_dir(old_mail_home)
def delete_dir(self):
hm = param_tools.get_global_parameter(
"handle_mailboxes", raise_exception=False)
if not hm:
return
MailboxOperation.objects.create(type="delete", argument=self.mail_home)
def set_quota(self, value=None, override_rules=False):
"""Set or update quota value for this mailbox.
A value equal to 0 means the mailbox won't have any quota. The
following cases allow people to define such behaviour:
* The domain has no quota
* :keyword:`override_rules` is True
:param integer value: the quota's value
:param bool override_rules: allow to override defined quota rules
"""
old_quota = self.quota
if value is None:
if self.use_domain_quota:
self.quota = self.domain.default_mailbox_quota
else:
self.quota = 0
else:
self.quota = value
if self.quota == 0:
if self.domain.quota and not override_rules:
raise lib_exceptions.BadRequest(_("A quota is required"))
elif self.domain.quota:
quota_usage = self.domain.allocated_quota
if old_quota:
quota_usage -= old_quota
if quota_usage + self.quota > self.domain.quota:
raise lib_exceptions.BadRequest(_("Domain quota exceeded"))
def get_quota(self):
"""Get quota limit.
:rtype: int
"""
return int(self.quota_value.bytes / 1048576)
def get_quota_in_percent(self):
"""Get current quota usage.
:rtype: int
"""
if not self.quota:
return 0
return int(
self.quota_value.bytes / float(self.quota * 1048576) * 100
)
def post_create(self, creator):
from modoboa.lib.permissions import grant_access_to_object
super(Mailbox, self).post_create(creator)
conditions = (
creator.has_perm("admin.add_mailbox"),
not self.user.has_perm("admin.add_domain")
)
if all(conditions):
# An admin is creating a new mailbox. Give
# access to that mailbox (and the associated
# account) to the appropriate domain admins,
# except if the new account has a more important
# role (SuperAdmin, Reseller)
for admin in self.domain.admins:
if admin == creator:
continue
grant_access_to_object(admin, self)
grant_access_to_object(admin, self.user)
def update_from_dict(self, user, values):
"""Update mailbox from a dictionary."""
newaddress = None
if values["email"] != self.full_address:
newaddress = values["email"]
elif (self.user.role == "SimpleUsers" and
self.user.username != self.full_address):
newaddress = self.user.username
if newaddress is not None:
local_part, domname = split_mailbox(newaddress)
domain = Domain.objects.filter(name=domname).first()
if domain is None:
raise lib_exceptions.NotFound(_("Domain does not exist"))
if not user.can_access(domain):
raise lib_exceptions.PermDeniedException
if "use_domain_quota" in values:
self.use_domain_quota = values["use_domain_quota"]
if "use_domain_quota" in values or "quota" in values:
override_rules = (
not self.quota or user.is_superuser or
user.has_perm("admin.add_domain") and
not user.userobjectlimit_set.get(name="quota").max_value
)
self.set_quota(values["quota"], override_rules)
if newaddress:
self.rename(local_part, domain)
self.save()
def save(self, *args, **kwargs):
"""Custom save.
We check that the address is unique and we make sure a quota
record is defined for this mailbox.
"""
qset = Mailbox.objects.filter(address=self.address, domain=self.domain)
if self.pk:
qset = qset.exclude(pk=self.pk)
if qset.exists():
raise lib_exceptions.Conflict(
_("Mailbox {} already exists").format(self))
if self.quota_value is None:
self.quota_value, created = Quota.objects.get_or_create(
username=self.full_address)
super(Mailbox, self).save(*args, **kwargs)
reversion.register(Mailbox)
@python_2_unicode_compatible
class SenderAddress(models.Model):
"""Extra sender address for Mailbox."""
address = models.EmailField()
mailbox = models.ForeignKey(Mailbox, on_delete=models.CASCADE)
class Meta:
app_label = "admin"
unique_together = [
("address", "mailbox"),
]
def __str__(self):
"""Return address."""
return smart_text(self.address)
reversion.register(SenderAddress)
class MailboxOperation(models.Model):
"""An operation on a mailbox."""
mailbox = models.ForeignKey(Mailbox, blank=True, null=True,
on_delete=models.CASCADE)
type = models.CharField( # NOQA:A003
max_length=20, choices=(("rename", "rename"), ("delete", "delete"))
)
argument = models.TextField()
class Meta:
app_label = "admin"
def __str__(self):
if self.type == "rename":
return "Rename %s -> %s" % (self.argument, self.mailbox.mail_home)
return "Delete %s" % self.argument
| isc | -8,243,152,318,968,370,000 | 31.72449 | 79 | 0.588634 | false |
sxjscience/tvm | python/tvm/relay/transform/memory_alloc.py | 1 | 14547 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks
"""
A pass for manifesting explicit memory allocations.
"""
import numpy as np
from tvm.ir.transform import PassContext, module_pass
from tvm.relay.transform import InferType
from tvm import nd, container
from ..function import Function
from ..expr_functor import ExprVisitor, ExprMutator
from ..scope_builder import ScopeBuilder
from .. import op
from ... import DataType, register_func
from .. import ty, expr
from ..backend import compile_engine
from ..op.memory import flatten_tuple_type, from_tuple_type, to_tuple_type
from ... import cpu
from ..op.memory import alloc_storage
from ..analysis import context_analysis
from ..._ffi.runtime_ctypes import TVMContext
def alloc_tensor(storage, shape, dtype="float32", assert_shape=None):
offset = expr.const(0, dtype="int64")
return op.memory.alloc_tensor(storage, offset, shape, dtype, assert_shape)
def is_primitive(call):
return (
hasattr(call, "op")
and hasattr(call.op, "attrs")
and hasattr(call.op.attrs, "Primitive")
and int(call.op.attrs.Primitive) == 1
)
def is_device_copy(func):
"""
Check if the current relay expression is a device copy call. We can simply check
the body of it if it is a function becase the device_copy op is opaque.
"""
if isinstance(func, Function):
body = func.body
return isinstance(body, expr.Call) and body.op == op.get("device_copy")
if isinstance(func, expr.Call):
return func.op == op.get("device_copy")
return False
class CheckReshapeOnly(ExprVisitor):
"""A pass to check if the fused op contains only reshape ops."""
def __init__(self):
super().__init__()
self._reshape_ops = [
op.get("reshape"),
op.get("contrib_reverse_reshape"),
op.get("dyn.reshape"),
]
self.reshape_only = True
def visit_call(self, call):
if not self.reshape_only:
return
if call.op not in self._reshape_ops:
self.reshape_only = False
for arg in call.args:
self.visit(arg)
def is_reshape_only(func):
"""Check if the primitive function contains only reshape ops."""
check = CheckReshapeOnly()
check.visit(func)
return check.reshape_only
class ManifestAllocPass(ExprMutator):
"""A pass for explicitly manifesting all memory allocations in Relay."""
def __init__(self, target_host, context_analysis_map):
self.invoke_tvm = op.vm.invoke_tvm_op
self.shape_func = op.vm.shape_func
self.shape_of = op.vm.shape_of
self.reshape_tensor = op.vm.reshape_tensor
self.scopes = [ScopeBuilder()]
self.target_host = target_host
self.default_context = cpu(0)
self.compute_dtype = "int64"
self.context_analysis_map = context_analysis_map
super().__init__()
def get_context(self, exp):
"""Get the context of a given expression"""
assert exp in self.context_analysis_map, exp.astext(False)
val = self.context_analysis_map[exp]
# val[0], val[1] are device_type and device_id, respectively.
# We don't need to unpack after porting this pass to C++.
assert len(val) == 2
return TVMContext(val[0].value, val[1].value)
def device_copy(self, inp, src_ctx, dst_ctx):
"""Insert a device copy node."""
return self.visit(op.tensor.device_copy(inp, src_ctx, dst_ctx))
def current_scope(self):
return self.scopes[-1]
def visit_tuple(self, tup):
scope = self.current_scope()
new_fields = []
for field in tup.fields:
field = self.visit(field)
if isinstance(field, expr.Constant):
field = scope.let("const", field)
new_fields.append(field)
return expr.Tuple(new_fields)
def compute_alignment(self, dtype):
dtype = DataType(dtype)
align = (dtype.bits // 8) * dtype.lanes
# MAGIC CONSTANT FROM device_api.h
if align < 64:
align = 64
return expr.const(align, dtype="int64")
def compute_storage_in_relay(self, shape, dtype):
dtype = DataType(dtype)
els = op.prod(shape)
num = expr.const(dtype.bits * dtype.lanes, self.compute_dtype)
num = num + expr.const(7, self.compute_dtype)
div = expr.const(8, self.compute_dtype)
return els * (num / div)
def compute_storage(self, tensor_type):
dtype = DataType(tensor_type.dtype)
shape = [int(sh) for sh in tensor_type.shape]
size = 1
for sh in shape:
size *= sh
size *= (dtype.bits * dtype.lanes + 7) // 8
return expr.const(size, dtype=self.compute_dtype)
def make_static_allocation(self, scope, tensor_type, ctx, name_hint):
"""Allocate a tensor with a statically known shape."""
shape = [int(sh) for sh in tensor_type.shape]
if len(shape) == 0:
shape = expr.const(np.empty((), dtype=self.compute_dtype), dtype=self.compute_dtype)
else:
shape = expr.const(np.array(shape), dtype=self.compute_dtype)
size = self.compute_storage(tensor_type)
alignment = self.compute_alignment(tensor_type.dtype)
dtype = tensor_type.dtype
sto = scope.let("storage_{0}".format(name_hint), alloc_storage(size, alignment, ctx, dtype))
# TODO(@jroesch): There is a bug with typing based on the constant shape.
tensor = alloc_tensor(sto, shape, dtype, tensor_type.shape)
return scope.let("tensor_{0}".format(name_hint), tensor)
def visit_let(self, let):
scope = ScopeBuilder()
self.scopes.append(scope)
while isinstance(let, expr.Let):
new_val = self.visit(let.value)
scope.let(let.var, new_val)
let = let.body
new_body = self.visit(let)
scope.ret(new_body)
self.scopes.pop()
return scope.get()
def emit_shape_func(self, scope, func, new_args):
"""Insert the shape function given a primitive function."""
shape_func_ins = []
engine = compile_engine.get()
cfunc = engine.lower_shape_func(func, self.target_host)
input_states = cfunc.shape_func_param_states
is_inputs = []
input_pos = 0
cpu_ctx = nd.cpu(0)
for i, (arg, state) in enumerate(zip(new_args, input_states)):
state = int(state)
# Pass Shapes
if state == 2:
for j, subexp in enumerate(from_tuple_type(arg.type_annotation, arg)):
sh_of = self.visit(self.shape_of(subexp))
shape_func_ins.append(scope.let("in_shape_{0}".format(input_pos + j), sh_of))
input_pos += 1
is_inputs.append(0)
# Pass Inputs
elif state == 1:
new_arg = self.visit(arg)
ctx = self.get_context(arg)
if ctx.device_type != cpu_ctx.device_type:
new_arg = self.device_copy(new_arg, ctx, cpu_ctx)
shape_func_ins.append(scope.let("in_shape_{0}".format(input_pos), new_arg))
input_pos += 1
is_inputs.append(1)
else:
# TODO(@jroesch): handle 3rd case
raise Exception("unsupported shape function input state")
out_shapes = []
for i, out in enumerate(cfunc.outputs):
tt = ty.TensorType(out.shape, out.dtype)
# Put shape func on CPU. This also ensures that everything between
# shape_of and shape_func are on CPU.
alloc = self.make_static_allocation(scope, tt, cpu_ctx, i)
alloc = scope.let("shape_func_out_{0}".format(i), alloc)
out_shapes.append(alloc)
shape_call = self.shape_func(
func, expr.Tuple(shape_func_ins), expr.Tuple(out_shapes), is_inputs
)
scope.let("shape_func", shape_call)
return out_shapes
def dynamic_invoke(self, scope, func, ins, new_args, out_types, ret_type):
"""Generate the code for invoking a TVM op with a dynamic shape."""
out_shapes = self.emit_shape_func(scope, func, new_args)
storages = []
func_ctx = self.get_context(func)
for i, (out_shape, out_type) in enumerate(zip(out_shapes, out_types)):
size = self.compute_storage_in_relay(out_shape, out_type.dtype)
alignment = self.compute_alignment(out_type.dtype)
sto = scope.let(
"storage_{i}".format(i=i), alloc_storage(size, alignment, func_ctx, out_type.dtype)
)
storages.append(sto)
outs = []
sh_ty_storage = zip(out_shapes, out_types, storages)
for i, (out_shape, out_type, storage) in enumerate(sh_ty_storage):
alloc = alloc_tensor(storage, out_shape, out_type.dtype, out_type.shape)
alloc = scope.let("out_{i}".format(i=i), alloc)
outs.append(alloc)
tuple_outs = expr.Tuple(outs)
invoke = self.invoke_tvm(func, ins, tuple_outs)
scope.let("", invoke)
return to_tuple_type(ret_type, tuple_outs.fields)
def emit_reshape_tensor(self, scope, func, new_args, ret_type):
if self.is_dynamic(ret_type):
out_shapes = self.emit_shape_func(scope, func, new_args)
shape_expr = out_shapes[0]
else:
# constant output shape
shape = [int(dim) for dim in ret_type.shape]
shape_expr = expr.const(shape, dtype=self.compute_dtype)
return self.reshape_tensor(new_args[0], shape_expr, ret_type.shape)
def is_dynamic(self, ret_type):
is_dynamic = ty.is_dynamic(ret_type)
# TODO(@jroesch): restore this code, more complex then it seems
# for arg in call.args:
# is_dynamic = is_dynamic or arg.checked_type.is_dynamic()
return is_dynamic
def visit_call(self, call):
if is_primitive(call):
# Because we are in ANF we do not need to visit the arguments.
scope = self.current_scope()
new_args = [self.visit(arg) for arg in call.args]
ins = expr.Tuple(new_args)
ret_type = call.checked_type
out_types = flatten_tuple_type(ret_type)
if is_reshape_only(call.op):
# Handle fused op that only contains reshape op
return self.emit_reshape_tensor(scope, call.op, new_args, ret_type)
if is_device_copy(call.op):
# Handle device copy op
if isinstance(call.op, Function):
attr = call.op.body.attrs
else:
attr = call.attr
return self.device_copy(
new_args[0], TVMContext(attr.src_dev_type, 0), TVMContext(attr.dst_dev_type, 0)
)
if self.is_dynamic(ret_type):
# Handle dynamic case.
return self.dynamic_invoke(scope, call.op, ins, new_args, out_types, ret_type)
# Handle static case.
outs = []
for i, out_ty in enumerate(out_types):
ctx = self.get_context(call)
assert isinstance(ctx, TVMContext)
out = self.make_static_allocation(scope, out_ty, ctx, i)
outs.append(out)
output = expr.Tuple(outs)
invoke = self.invoke_tvm(call.op, ins, output)
scope.let("", invoke)
return to_tuple_type(ret_type, output.fields)
return super().visit_call(call)
def mk_analysis_annotator(results):
"""Pretty print the annotated relay program with device info"""
def _annotator(exp):
if exp in results:
val = results[exp]
assert len(val) == 2
ctx = TVMContext(val[0].value, val[1].value)
return f"<{ctx}>"
else:
return ""
return _annotator
@module_pass(opt_level=0)
class ManifestAlloc:
"""The explicit pass wrapper around ManifestAlloc."""
# TODO(zhiics, jroesch) Port this pass to C++.
def __init__(self, target_host, targets):
self.target_host = target_host
self.targets = targets
def transform_module(self, mod, _):
"""Invokes the pass"""
# TODO(@jroesch): Is there a way to do one shot initialization?
# can we have def pass_init?
mod.import_from_std("core.rly")
mod = InferType()(mod)
assert isinstance(self.targets, (dict, container.Map))
if len(self.targets) > 1:
pass_ctx = PassContext.current()
if "relay.fallback_device_type" in pass_ctx.config:
fallback_ctx = nd.context(pass_ctx.config["relay.fallback_device_type"])
else:
fallback_ctx = cpu(0)
ca = context_analysis(mod, TVMContext(fallback_ctx.device_type, 0))
else:
if isinstance(self.targets, dict):
dev = list(self.targets.keys())[0]
else:
dev, _ = self.targets.items()[0]
ca = context_analysis(mod, nd.context(dev.value))
# The following code can be used for debugging the module after
# annotation.
# print(mod.astext(show_meta_data=False, annotate=mk_analysis_annotator(ca)))
gv_funcs = mod.functions
for gv, f in gv_funcs.items():
ea = ManifestAllocPass(self.target_host, ca)
f = ea.visit(f)
mod.update_func(gv, f)
return mod
register_func("relay.transform.ManifestAlloc", ManifestAlloc)
| apache-2.0 | -187,480,350,309,768,800 | 36.882813 | 100 | 0.59813 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.