repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
andmos/ansible | test/units/modules/network/ingate/test_ig_config.py | 50 | 8319 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Ingate Systems AB
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat.mock import patch
from ansible.modules.network.ingate import ig_config
from units.modules.utils import set_module_args
from .ingate_module import TestIngateModule, load_fixture
class TestConfigModule(TestIngateModule):
module = ig_config
def setUp(self):
super(TestConfigModule, self).setUp()
self.mock_make_request = patch('ansible.modules.network.ingate.'
'ig_config.make_request')
self.make_request = self.mock_make_request.start()
# ATM the Ingate Python SDK is not needed in this unit test.
self.module.HAS_INGATESDK = True
def tearDown(self):
super(TestConfigModule, self).tearDown()
self.mock_make_request.stop()
def load_fixtures(self, fixture=None, command=None, changed=False):
self.make_request.side_effect = [(changed, command,
load_fixture(fixture))]
def test_ig_config_add(self):
"""Test adding a row to a table.
"""
command = 'add'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
add=True,
table='misc.dns_servers',
columns=dict(
server='192.168.1.23'
)))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_delete(self):
"""Test deleting all rows in a table.
"""
command = 'delete'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
delete=True,
table='misc.dns_servers',
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_get(self):
"""Test returning all rows in a table.
"""
command = 'get'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
get=True,
table='misc.dns_servers',
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_modify(self):
"""Test modifying a row.
"""
command = 'modify'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
modify=True,
table='misc.unitname',
columns=dict(
unitname='"Testapi - 1541699806"'
)))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_revert(self):
"""Test reverting the preliminary configuration.
"""
command = 'revert'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
revert=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_factory(self):
"""Test loading factory defaults.
"""
command = 'factory'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
factory=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_store(self):
"""Test storing the preliminary configuration.
"""
command = 'store'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
store=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_download(self):
"""Test doing backup of configuration database.
"""
command = 'store'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
download=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_return_rowid(self):
"""Test retrieving a row id.
"""
command = 'return_rowid'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
return_rowid=True,
table='network.local_nets',
columns=dict(
interface='eth0'
)))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
| gpl-3.0 | 1,795,199,870,803,030,800 | 4,975,325,545,144,661,000 | 33.518672 | 73 | 0.492126 | false |
goldsborough/.emacs | .emacs.d/.python-environments/default/lib/python3.5/site-packages/setuptools/command/install_scripts.py | 505 | 2231 | from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| mit | 2,832,244,595,402,046,000 | 5,621,876,540,593,096,000 | 36.183333 | 79 | 0.619901 | false |
molgun/oclapi | django-nonrel/ocl/mappings/views.py | 4 | 15934 | from django.core.exceptions import ValidationError
from django.db.models import Q
from django.http import HttpResponse
from rest_framework import mixins, status
from rest_framework.generics import RetrieveAPIView, ListAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.response import Response
from concepts.permissions import CanEditParentDictionary, CanViewParentDictionary
from mappings.filters import PublicMappingsSearchFilter, SourceRestrictedMappingsFilter, CollectionRestrictedMappingFilter
from mappings.models import Mapping, MappingVersion
from mappings.serializers import MappingCreateSerializer, MappingUpdateSerializer, MappingDetailSerializer, MappingListSerializer, \
MappingVersionDetailSerializer, MappingVersionListSerializer
from oclapi.mixins import ListWithHeadersMixin
from oclapi.models import ACCESS_TYPE_NONE
from oclapi.views import ConceptDictionaryMixin, BaseAPIView, parse_updated_since_param, VersionedResourceChildMixin
from sources.models import SourceVersion
from orgs.models import Organization
from users.models import UserProfile
INCLUDE_RETIRED_PARAM = 'includeRetired'
LIMIT_PARAM = 'limit'
class MappingBaseView(ConceptDictionaryMixin):
lookup_field = 'mapping'
pk_field = 'id'
model = Mapping
child_list_attribute = 'mappings'
include_retired = False
permission_classes = (CanViewParentDictionary,)
def initialize(self, request, path_info_segment, **kwargs):
super(MappingBaseView, self).initialize(request, path_info_segment, **kwargs)
if self.parent_resource:
if hasattr(self.parent_resource, 'versioned_object'):
self.parent_resource_version = self.parent_resource
self.parent_resource = self.parent_resource.versioned_object
else:
self.parent_resource_version = self.parent_resource.get_head()
def get_queryset(self):
queryset = super(ConceptDictionaryMixin, self).get_queryset()
owner_is_self = self.parent_resource and self.userprofile and self.parent_resource.owner == self.userprofile
if self.parent_resource:
queryset = queryset.filter(parent_id=self.parent_resource.id)
if not(self.user.is_staff or owner_is_self):
queryset = queryset.filter(~Q(public_access=ACCESS_TYPE_NONE))
return queryset
class MappingVersionCsvMixin:
def get_csv_rows(self, queryset=None):
if not queryset:
queryset = self.get_queryset()
values = queryset.values('map_type','versioned_object_id','uri')
for value in values:
mapping = Mapping.objects.get(id=value.pop('versioned_object_id'))
value['From Concept Owner'] = mapping.from_source_owner
value['From Concept Source'] = mapping.from_source_name
value['From Concept Code'] = mapping.from_concept_code
value['From Concept Name'] = mapping.from_concept_name
value['Map Type'] = value.pop('map_type')
value['To Concept Owner'] = mapping.to_source_owner
value['To Concept Source'] = mapping.to_source_name
value['To Concept Code'] = mapping.get_to_concept_code()
value['To Concept Name'] = mapping.get_to_concept_name()
value['Internal/External'] = 'Internal' if mapping.to_concept_url else 'External'
value['Retired'] = mapping.retired
value['External ID'] = mapping.external_id
value['Last Updated'] = mapping.updated_at
value['Updated By'] = mapping.updated_by
value['Mapping Owner'] = mapping.owner
value['Mapping Source'] = mapping.source
value['URI'] = value.pop('uri')
values.field_names.extend(['From Concept Owner','From Concept Source','From Concept Code','From Concept Name','Map Type','To Concept Owner',
'To Concept Source','To Concept Code','To Concept Name','Internal/External','Retired','External ID','Last Updated','Updated By','Mapping Owner','Mapping Source','URI'])
del values.field_names[0:3]
return values
class MappingVersionBaseView(ConceptDictionaryMixin):
lookup_field = 'mapping_version'
model = MappingVersion
include_retired = False
permission_classes = (CanViewParentDictionary,)
queryset = MappingVersion.objects.filter(is_active=True)
def initialize(self, request, path_info_segment, **kwargs):
super(MappingVersionBaseView, self).initialize(request, path_info_segment, **kwargs)
def get_queryset(self):
queryset = MappingVersion.objects.filter(is_active=True, versioned_object_id=self.kwargs.get('mapping'))
return queryset
class MappingDetailView(MappingBaseView, RetrieveAPIView, UpdateAPIView, DestroyAPIView):
serializer_class = MappingDetailSerializer
def destroy(self, request, *args, **kwargs):
self.permission_classes = (CanEditParentDictionary,)
mapping = self.get_object_or_none()
if mapping is None:
return Response(
{'non_field_errors': 'Could not find mapping to retire'},
status=status.HTTP_404_NOT_FOUND)
update_comment = None
if 'update_comment' in request.DATA:
update_comment = request.DATA.get('update_comment')
errors = Mapping.retire(mapping, request.user, update_comment=update_comment)
if errors:
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_204_NO_CONTENT)
def update(self, request, *args, **kwargs):
self.permission_classes = (CanEditParentDictionary,)
self.serializer_class = MappingUpdateSerializer
partial = True
self.object = self.get_object()
created = False
save_kwargs = {'force_update': True}
if 'update_comment' in request.DATA:
save_kwargs = {'force_update':True, 'update_comment': request.DATA.get('update_comment')}
else:
save_kwargs = {'force_update': True}
success_status_code = status.HTTP_200_OK
serializer = self.get_serializer(self.object, data=request.DATA,
files=request.FILES, partial=partial)
if serializer.is_valid():
try:
self.pre_save(serializer.object)
except ValidationError as e:
return Response(e.messages, status=status.HTTP_400_BAD_REQUEST)
self.object = serializer.save(**save_kwargs)
self.post_save(self.object, created=created)
serializer = MappingDetailSerializer(self.object, context={'request': request})
return Response(serializer.data, status=success_status_code)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class MappingVersionMixin():
lookup_field = 'mapping_version'
pk_field = 'mnemonic'
model = MappingVersion
parent_resource_version_model = SourceVersion
permission_classes = (CanViewParentDictionary,)
child_list_attribute = 'mappings'
class MappingVersionsListView(MappingVersionMixin, VersionedResourceChildMixin,
ListWithHeadersMixin, MappingVersionCsvMixin):
serializer_class = MappingVersionListSerializer
solr_fields = {
'lastUpdate': {'sortable': True, 'filterable': False, 'facet': False},
'concept': {'sortable': False, 'filterable': True, 'facet': False},
'fromConcept': {'sortable': False, 'filterable': True, 'facet': False},
'toConcept': {'sortable': False, 'filterable': True, 'facet': False},
'retired': {'sortable': False, 'filterable': True, 'facet': True},
'mapType': {'sortable': False, 'filterable': True, 'facet': True},
'source': {'sortable': False, 'filterable': True, 'facet': True},
'collection': {'sortable': False, 'filterable': True, 'facet': True},
'owner': {'sortable': False, 'filterable': True, 'facet': True},
'ownerType': {'sortable': False, 'filterable': True, 'facet': True},
'conceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
}
def get(self, request, *args, **kwargs):
self.filter_backends = [CollectionRestrictedMappingFilter] if 'collection' in kwargs else [SourceRestrictedMappingsFilter]
self.include_retired = request.QUERY_PARAMS.get(INCLUDE_RETIRED_PARAM, False)
self.updated_since = parse_updated_since_param(request)
return self.list(request, *args, **kwargs)
def get_queryset(self):
if ('collection' in self.kwargs and 'version' not in self.kwargs) or ('collection' in self.kwargs and 'version' in self.kwargs and self.kwargs['version'] == 'HEAD'):
all_children = getattr(self.parent_resource_version, self.child_list_attribute) or []
queryset = super(ConceptDictionaryMixin, self).get_queryset()
queryset = queryset.filter(versioned_object_id__in=all_children, is_latest_version=True)
else:
queryset = super(MappingVersionsListView, self).get_queryset()
queryset = queryset.filter(is_active=True)
if not self.include_retired:
queryset = queryset.filter(~Q(retired=True))
if self.updated_since:
queryset = queryset.filter(updated_at__gte=self.updated_since)
return queryset
def get_owner(self):
owner = None
if 'user' in self.kwargs:
owner_id = self.kwargs['user']
owner = UserProfile.objects.get(mnemonic=owner_id)
elif 'org' in self.kwargs:
owner_id = self.kwargs['org']
owner = Organization.objects.get(mnemonic=owner_id)
return owner
class MappingVersionsView(ConceptDictionaryMixin, ListWithHeadersMixin):
serializer_class = MappingVersionListSerializer
permission_classes = (CanViewParentDictionary,)
def get(self, request, *args, **kwargs):
self.serializer_class = MappingVersionDetailSerializer
return self.list(request, *args, **kwargs)
def get_queryset(self):
return MappingVersion.objects.filter(versioned_object_id=self.parent_resource.id, is_active=True)
class MappingVersionDetailView(MappingVersionBaseView, RetrieveAPIView):
serializer_class = MappingVersionDetailSerializer
def initialize(self, request, path_info_segment, **kwargs):
super(MappingVersionDetailView, self).initialize(request, path_info_segment, **kwargs)
def get_level(self):
return 1
class MappingListView(MappingBaseView,
ListAPIView,
CreateAPIView,
ListWithHeadersMixin,
mixins.CreateModelMixin):
queryset = Mapping.objects.filter(is_active=True)
serializer_class = MappingCreateSerializer
def get(self, request, *args, **kwargs):
delegate_view = MappingVersionsListView.as_view()
return delegate_view(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
self.permission_classes = (CanEditParentDictionary,)
if not self.parent_resource:
return HttpResponse(status=status.HTTP_405_METHOD_NOT_ALLOWED)
serializer = self.get_serializer(data=request.DATA, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
save_kwargs = {
'force_insert': True,
'parent_resource': self.parent_resource,
}
self.object = serializer.save(**save_kwargs)
if serializer.is_valid():
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
serializer = MappingDetailSerializer(self.object, context={'request': request})
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
return Response({'errors' : (('' if k == '__all__' else k +' : ')+ v[0]) for k, v in serializer.errors.items()}, status=status.HTTP_400_BAD_REQUEST)
def get_queryset(self):
queryset = super(ConceptDictionaryMixin, self).get_queryset()
if not self.include_retired:
queryset = queryset.filter(~Q(retired=True))
return queryset
def get_owner(self):
owner = None
if 'user' in self.kwargs:
owner_id = self.kwargs['user']
owner = UserProfile.objects.get(mnemonic=owner_id)
elif 'org' in self.kwargs:
owner_id = self.kwargs['org']
owner = Organization.objects.get(mnemonic=owner_id)
return owner
class MappingListAllView(BaseAPIView, ListWithHeadersMixin, MappingVersionCsvMixin):
model = MappingVersion
filter_backends = [PublicMappingsSearchFilter,]
permission_classes = (CanEditParentDictionary,)
queryset = MappingVersion.objects.filter(is_active=True)
solr_fields = {
'lastUpdate': {'sortable': True, 'filterable': False, 'facet': False},
'concept': {'sortable': False, 'filterable': True, 'facet': False},
'fromConcept': {'sortable': False, 'filterable': True, 'facet': False},
'toConcept': {'sortable': False, 'filterable': True, 'facet': False},
'retired': {'sortable': False, 'filterable': True, 'facet': True},
'mapType': {'sortable': False, 'filterable': True, 'facet': True},
'source': {'sortable': False, 'filterable': True, 'facet': True},
'owner': {'sortable': False, 'filterable': True, 'facet': True},
'ownerType': {'sortable': False, 'filterable': True, 'facet': True},
'conceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
}
include_retired = False
default_filters = {'is_active': True, 'is_latest_version': True}
def get(self, request, *args, **kwargs):
self.include_retired = request.QUERY_PARAMS.get(INCLUDE_RETIRED_PARAM, False)
self.serializer_class = MappingVersionDetailSerializer if self.is_verbose(request) else MappingVersionListSerializer
self.limit = request.QUERY_PARAMS.get(LIMIT_PARAM, 25)
return self.list(request, *args, **kwargs)
def get_queryset(self):
queryset = super(MappingListAllView, self).get_queryset()
if not self.include_retired:
queryset = queryset.filter(~Q(retired=True))
if not self.request.user.is_staff:
queryset = queryset.filter(~Q(public_access=ACCESS_TYPE_NONE))
return queryset[0:self.limit]
| mpl-2.0 | -6,756,803,982,218,912,000 | 8,466,215,770,598,490,000 | 49.264984 | 203 | 0.659282 | false |
nicky-ji/edx-nicky | lms/lib/comment_client/models.py | 27 | 5994 | import logging
from .utils import extract, perform_request, CommentClientRequestError
log = logging.getLogger(__name__)
class Model(object):
accessible_fields = ['id']
updatable_fields = ['id']
initializable_fields = ['id']
base_url = None
default_retrieve_params = {}
metric_tag_fields = []
DEFAULT_ACTIONS_WITH_ID = ['get', 'put', 'delete']
DEFAULT_ACTIONS_WITHOUT_ID = ['get_all', 'post']
DEFAULT_ACTIONS = DEFAULT_ACTIONS_WITH_ID + DEFAULT_ACTIONS_WITHOUT_ID
def __init__(self, *args, **kwargs):
self.attributes = extract(kwargs, self.accessible_fields)
self.retrieved = False
def __getattr__(self, name):
if name == 'id':
return self.attributes.get('id', None)
try:
return self.attributes[name]
except KeyError:
if self.retrieved or self.id is None:
raise AttributeError("Field {0} does not exist".format(name))
self.retrieve()
return self.__getattr__(name)
def __setattr__(self, name, value):
if name == 'attributes' or name not in self.accessible_fields:
super(Model, self).__setattr__(name, value)
else:
self.attributes[name] = value
def __getitem__(self, key):
if key not in self.accessible_fields:
raise KeyError("Field {0} does not exist".format(key))
return self.attributes.get(key)
def __setitem__(self, key, value):
if key not in self.accessible_fields:
raise KeyError("Field {0} does not exist".format(key))
self.attributes.__setitem__(key, value)
def items(self, *args, **kwargs):
return self.attributes.items(*args, **kwargs)
def get(self, *args, **kwargs):
return self.attributes.get(*args, **kwargs)
def to_dict(self):
self.retrieve()
return self.attributes
def retrieve(self, *args, **kwargs):
if not self.retrieved:
self._retrieve(*args, **kwargs)
self.retrieved = True
return self
def _retrieve(self, *args, **kwargs):
url = self.url(action='get', params=self.attributes)
response = perform_request(
'get',
url,
self.default_retrieve_params,
metric_tags=self._metric_tags,
metric_action='model.retrieve'
)
self._update_from_response(response)
@property
def _metric_tags(self):
"""
Returns a list of tags to be used when recording metrics about this model.
Each field named in ``self.metric_tag_fields`` is used as a tag value,
under the key ``<class>.<metric_field>``. The tag model_class is used to
record the class name of the model.
"""
tags = [
u'{}.{}:{}'.format(self.__class__.__name__, attr, self[attr])
for attr in self.metric_tag_fields
if attr in self.attributes
]
tags.append(u'model_class:{}'.format(self.__class__.__name__))
return tags
@classmethod
def find(cls, id):
return cls(id=id)
def _update_from_response(self, response_data):
for k, v in response_data.items():
if k in self.accessible_fields:
self.__setattr__(k, v)
else:
log.warning(
"Unexpected field {field_name} in model {model_name}".format(
field_name=k,
model_name=self.__class__.__name__
)
)
def updatable_attributes(self):
return extract(self.attributes, self.updatable_fields)
def initializable_attributes(self):
return extract(self.attributes, self.initializable_fields)
@classmethod
def before_save(cls, instance):
pass
@classmethod
def after_save(cls, instance):
pass
def save(self):
self.before_save(self)
if self.id: # if we have id already, treat this as an update
url = self.url(action='put', params=self.attributes)
response = perform_request(
'put',
url,
self.updatable_attributes(),
metric_tags=self._metric_tags,
metric_action='model.update'
)
else: # otherwise, treat this as an insert
url = self.url(action='post', params=self.attributes)
response = perform_request(
'post',
url,
self.initializable_attributes(),
metric_tags=self._metric_tags,
metric_action='model.insert'
)
self.retrieved = True
self._update_from_response(response)
self.after_save(self)
def delete(self):
url = self.url(action='delete', params=self.attributes)
response = perform_request('delete', url, metric_tags=self._metric_tags, metric_action='model.delete')
self.retrieved = True
self._update_from_response(response)
@classmethod
def url_with_id(cls, params={}):
return cls.base_url + '/' + str(params['id'])
@classmethod
def url_without_id(cls, params={}):
return cls.base_url
@classmethod
def url(cls, action, params={}):
if cls.base_url is None:
raise CommentClientRequestError("Must provide base_url when using default url function")
if action not in cls.DEFAULT_ACTIONS:
raise ValueError("Invalid action {0}. The supported action must be in {1}".format(action, str(cls.DEFAULT_ACTIONS)))
elif action in cls.DEFAULT_ACTIONS_WITH_ID:
try:
return cls.url_with_id(params)
except KeyError:
raise CommentClientRequestError("Cannot perform action {0} without id".format(action))
else: # action must be in DEFAULT_ACTIONS_WITHOUT_ID now
return cls.url_without_id()
| agpl-3.0 | 5,484,156,973,930,953,000 | -5,643,960,966,056,598,000 | 32.864407 | 128 | 0.573073 | false |
gymnasium/edx-platform | common/lib/xmodule/xmodule/conditional_module.py | 8 | 15152 | """Conditional module is the xmodule, which you can use for disabling
some xmodules by conditions.
"""
import json
import logging
from lazy import lazy
from lxml import etree
from pkg_resources import resource_string
from six import text_type
from opaque_keys.edx.locator import BlockUsageLocator
from web_fragments.fragment import Fragment
from xblock.fields import ReferenceList, Scope, String
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.seq_module import SequenceDescriptor
from xmodule.studio_editable import StudioEditableDescriptor, StudioEditableModule
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW, XModule
log = logging.getLogger('edx.' + __name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class ConditionalFields(object):
has_children = True
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
default=_('Conditional')
)
show_tag_list = ReferenceList(
help=_("List of urls of children that are references to external modules"),
scope=Scope.content
)
sources_list = ReferenceList(
display_name=_("Source Components"),
help=_("The component location IDs of all source components that are used to determine whether a learner is "
"shown the content of this conditional module. Copy the component location ID of a component from its "
"Settings dialog in Studio."),
scope=Scope.content
)
conditional_attr = String(
display_name=_("Conditional Attribute"),
help=_("The attribute of the source components that determines whether a learner is shown the content of this "
"conditional module."),
scope=Scope.content,
default='correct',
values=lambda: [{'display_name': xml_attr, 'value': xml_attr}
for xml_attr in ConditionalModule.conditions_map.keys()]
)
conditional_value = String(
display_name=_("Conditional Value"),
help=_("The value that the conditional attribute of the source components must match before a learner is shown "
"the content of this conditional module."),
scope=Scope.content,
default='True'
)
conditional_message = String(
display_name=_("Blocked Content Message"),
help=_("The message that is shown to learners when not all conditions are met to show the content of this "
"conditional module. Include {link} in the text of your message to give learners a direct link to "
"required units. For example, 'You must complete {link} before you can access this unit'."),
scope=Scope.content,
default=_('You must complete {link} before you can access this unit.')
)
class ConditionalModule(ConditionalFields, XModule, StudioEditableModule):
"""
Blocks child module from showing unless certain conditions are met.
Example:
<conditional sources="i4x://.../problem_1; i4x://.../problem_2" completed="True">
<show sources="i4x://.../test_6; i4x://.../Avi_resources"/>
<video url_name="secret_video" />
</conditional>
<conditional> tag attributes:
sources - location id of required modules, separated by ';'
submitted - map to `is_submitted` module method.
(pressing RESET button makes this function to return False.)
attempted - map to `is_attempted` module method
correct - map to `is_correct` module method
poll_answer - map to `poll_answer` module attribute
voted - map to `voted` module attribute
<show> tag attributes:
sources - location id of required modules, separated by ';'
You can add you own rules for <conditional> tag, like
"completed", "attempted" etc. To do that yo must extend
`ConditionalModule.conditions_map` variable and add pair:
my_attr: my_property/my_method
After that you can use it:
<conditional my_attr="some value" ...>
...
</conditional>
And my_property/my_method will be called for required modules.
"""
js = {
'js': [
resource_string(__name__, 'js/src/conditional/display.js'),
resource_string(__name__, 'js/src/javascript_loader.js'),
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "Conditional"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
# Map
# key: <tag attribute in xml>
# value: <name of module attribute>
conditions_map = {
'poll_answer': 'poll_answer', # poll_question attr
# problem was submitted (it can be wrong)
# if student will press reset button after that,
# state will be reverted
'submitted': 'is_submitted', # capa_problem attr
# if student attempted problem
'attempted': 'is_attempted', # capa_problem attr
# if problem is full points
'correct': 'is_correct',
'voted': 'voted' # poll_question attr
}
@lazy
def required_modules(self):
return [self.system.get_module(descriptor) for
descriptor in self.descriptor.get_required_module_descriptors()]
def is_condition_satisfied(self):
attr_name = self.conditions_map[self.conditional_attr]
if self.conditional_value and self.required_modules:
for module in self.required_modules:
if not hasattr(module, attr_name):
# We don't throw an exception here because it is possible for
# the descriptor of a required module to have a property but
# for the resulting module to be a (flavor of) ErrorModule.
# So just log and return false.
if module is not None:
# We do not want to log when module is None, and it is when requester
# does not have access to the requested required module.
log.warn('Error in conditional module: \
required module {module} has no {module_attr}'.format(module=module, module_attr=attr_name))
return False
attr = getattr(module, attr_name)
if callable(attr):
attr = attr()
if self.conditional_value != str(attr):
break
else:
return True
return False
def get_html(self):
# Calculate html ids of dependencies
self.required_html_ids = [descriptor.location.html_id() for
descriptor in self.descriptor.get_required_module_descriptors()]
return self.system.render_template('conditional_ajax.html', {
'element_id': self.location.html_id(),
'ajax_url': self.system.ajax_url,
'depends': ';'.join(self.required_html_ids)
})
def author_view(self, context):
"""
Renders the Studio preview by rendering each child so that they can all be seen and edited.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
if is_root:
# User has clicked the "View" link. Show a preview of all possible children:
self.render_children(context, fragment, can_reorder=True, can_add=True)
# else: When shown on a unit page, don't show any sort of preview -
# just the status of this block in the validation area.
return fragment
def handle_ajax(self, _dispatch, _data):
"""This is called by courseware.moduleodule_render, to handle
an AJAX call.
"""
if not self.is_condition_satisfied():
context = {'module': self,
'message': self.conditional_message}
html = self.system.render_template('conditional_module.html',
context)
return json.dumps({'html': [html], 'message': bool(self.conditional_message)})
html = [child.render(STUDENT_VIEW).content for child in self.get_display_items()]
return json.dumps({'html': html})
def get_icon_class(self):
new_class = 'other'
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
child_classes = [self.system.get_module(child_descriptor).get_icon_class()
for child_descriptor in self.descriptor.get_children()]
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
class ConditionalDescriptor(ConditionalFields, SequenceDescriptor, StudioEditableDescriptor):
"""Descriptor for conditional xmodule."""
_tag_name = 'conditional'
module_class = ConditionalModule
resources_dir = None
filename_extension = "xml"
has_score = False
show_in_read_only_mode = True
def __init__(self, *args, **kwargs):
"""
Create an instance of the conditional module.
"""
super(ConditionalDescriptor, self).__init__(*args, **kwargs)
# Convert sources xml_attribute to a ReferenceList field type so Location/Locator
# substitution can be done.
if not self.sources_list:
if 'sources' in self.xml_attributes and isinstance(self.xml_attributes['sources'], basestring):
self.sources_list = [
# TODO: it is not clear why we are replacing the run here (which actually is a no-op
# for old-style course locators. However, this is the implementation of
# CourseLocator.make_usage_key_from_deprecated_string, which was previously
# being called in this location.
BlockUsageLocator.from_string(item).replace(run=self.location.course_key.run)
for item in ConditionalDescriptor.parse_sources(self.xml_attributes)
]
@staticmethod
def parse_sources(xml_element):
""" Parse xml_element 'sources' attr and return a list of location strings. """
sources = xml_element.get('sources')
if sources:
return [location.strip() for location in sources.split(';')]
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescriptor instances upon
which this module depends.
"""
descriptors = []
for location in self.sources_list:
try:
descriptor = self.system.load_item(location)
descriptors.append(descriptor)
except ItemNotFoundError:
msg = "Invalid module by location."
log.exception(msg)
self.system.error_tracker(msg)
return descriptors
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
show_tag_list = []
definition = {}
for conditional_attr in ConditionalModule.conditions_map.iterkeys():
conditional_value = xml_object.get(conditional_attr)
if conditional_value is not None:
definition.update({
'conditional_attr': conditional_attr,
'conditional_value': str(conditional_value),
})
for child in xml_object:
if child.tag == 'show':
locations = ConditionalDescriptor.parse_sources(child)
for location in locations:
children.append(location)
show_tag_list.append(location)
else:
try:
descriptor = system.process_xml(etree.tostring(child))
children.append(descriptor.scope_ids.usage_id)
except:
msg = "Unable to load child when parsing Conditional."
log.exception(msg)
system.error_tracker(msg)
definition.update({
'show_tag_list': show_tag_list,
'conditional_message': xml_object.get('message', '')
})
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element(self._tag_name)
for child in self.get_children():
if child.location not in self.show_tag_list:
self.runtime.add_block_as_child_node(child, xml_object)
if self.show_tag_list:
show_str = u'<{tag_name} sources="{sources}" />'.format(
tag_name='show', sources=';'.join(text_type(location) for location in self.show_tag_list))
xml_object.append(etree.fromstring(show_str))
# Overwrite the original sources attribute with the value from sources_list, as
# Locations may have been changed to Locators.
stringified_sources_list = map(lambda loc: text_type(loc), self.sources_list)
self.xml_attributes['sources'] = ';'.join(stringified_sources_list)
self.xml_attributes[self.conditional_attr] = self.conditional_value
self.xml_attributes['message'] = self.conditional_message
return xml_object
def validate(self):
validation = super(ConditionalDescriptor, self).validate()
if not self.sources_list:
conditional_validation = StudioValidation(self.location)
conditional_validation.add(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"This component has no source components configured yet."),
action_class='edit-button',
action_label=_(u"Configure list of sources")
)
)
validation = StudioValidation.copy(validation)
validation.summary = conditional_validation.messages[0]
return validation
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ConditionalDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ConditionalDescriptor.due,
ConditionalDescriptor.is_practice_exam,
ConditionalDescriptor.is_proctored_enabled,
ConditionalDescriptor.is_time_limited,
ConditionalDescriptor.default_time_limit_minutes,
ConditionalDescriptor.show_tag_list,
ConditionalDescriptor.exam_review_rules,
])
return non_editable_fields
| agpl-3.0 | -3,463,751,744,343,714,300 | -2,313,188,018,998,508,000 | 38.978892 | 120 | 0.608831 | false |
thaines/rfam | bin/prman_AlfParser.py | 1 | 9166 | import pyparsing as pp
import re
import copy
class prman_AlfParser:
def __init__(self):
self.keywords = ['Job', 'Task', 'RemoteCmd']
def parseFile(self, fileText):
commands = self.__parseCommandStructure(fileText, 0, isStart = True)
#print(commands)
textureCmds, Cmds, frames = self.extractCommandHierarchy(commands)
return [textureCmds, Cmds, frames]
def printCommands(self, cmds, currentIndent = 0):
if isinstance(cmds, list):
for e in cmds:
self.printCommands(e, currentIndent + 1)
print('---------------------')
else:
tabs = ''
for i in range(currentIndent):
tabs += '\t'
print(tabs + repr(cmds))
def __matchBracket(self, str):
if str[0] != '{':
return None
num_open = 0
for i, c in enumerate(str):
if c == '{':
num_open += 1
elif c == '}':
num_open -= 1
if num_open == 0:
return str[1:i]
return None
def leadingSpace(self, text):
return len(text) - len(text.lstrip())
def removingLeadingNewLines(self, text):
return text.lstrip('\n')
def determineCommandLength(self, text):
if text[0] == '\n':
raise ValueError('Determine command length should never take newline as first char!')
text = copy.deepcopy(text)
lines = text.split('\n')
lengths = [len(l) for l in lines]
currentIndent = self.leadingSpace(lines[0])
extent = len(lines[0])
for i, l in enumerate(lines[1:]):
if self.leadingSpace(l) != currentIndent:
extent += lengths[i + 1] + 1
else:
extent += lengths[i + 1] + 1
return extent
return extent
def extractAllArgs(self, text):
currentIndent = 0
parsingBracket = False
parsingSimple = False
args = []
argNames = []
resultText = ''
currentBracketText = ''
i = 0
while i < len(text):
if parsingBracket:
#process indents
if text[i] == '}':
currentIndent -= 1
currentBracketText += text[i]
if currentIndent == 0:
args.append(currentBracketText[1:-1])
currentBracketText = ''
parsingBracket = False
currentIndent = 0
elif text[i] == '{':
currentBracketText += text[i]
currentIndent += 1
else:
currentBracketText += text[i]
elif parsingSimple:
if text[i] == ' ':
args.append(currentBracketText )
currentBracketText = ''
parsingSimple = False
else:
currentBracketText += text[i]
else:
if text[i] == '-':
counter = 1
argName = ''
while True:
if text[i + counter] == ' ':
argNames.append(argName)
if text[i + counter + 1] == '{':
currentIndent = 0
parsingBracket = True
i = i + counter
else:
parsingSimple = True
i = i + counter
break
else:
argName += text[i + counter]
counter += 1
i += 1
return argNames, args, resultText
def parseOptions(self, text):
optsNames, opts, textWithoutOpts = self.extractAllArgs(text)
result = {}
for i in range(len(optsNames)):
result[optsNames[i]] = opts[i]
return result
def parseJob(self, text):
newJob = self.parseOptions(text)
newJob['type'] = 'job'
return newJob
def parseRemoteCmd(self, text):
#grab the actual command
i = len(text) - 1
actualCommand = ''
while i > 0:
if text[i] == '}':
break
else:
i -= 1
while i > 0:
if text[i] == '{':
actualCommand = text[i] + actualCommand
break
else:
actualCommand = text[i] + actualCommand
i -=1
newCmd = self.parseOptions(text[:i])
newCmd['type'] = 'remoteCommand'
newCmd['command'] = actualCommand[1:-1]
return newCmd
def parseTask(self, text):
#parse Task Name
taskName = ''
start = text.find('{') + 1
for i in range(start, len(text)):
if text[i] == '}':
break
else:
taskName += text[i]
text = text[i+1:]
newTask = self.parseOptions(text)
newTask['type'] = 'task'
newTask['taskName'] = taskName
return newTask
def __parseCommandStructure(self, text, indentLevel, isStart = False):
structure = []
text = copy.deepcopy(text)
if isStart:
text = text[17:]
starts = [text.find(k) for k in self.keywords]
for i in range(len(starts)):
if starts[i] < 0:
starts[i] = 111111111111111111
lowestStartIdx = starts.index(min(starts))
#move back until new line
startIdx = starts[lowestStartIdx]
if startIdx == 111111111111111111:
return None
while startIdx > 0:
if text[startIdx - 1] == '\t':
startIdx -= 1
else:
break
if lowestStartIdx == 0: #Job
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseJob(text[startIdx+3:startIdx+length])
elif lowestStartIdx == 1: #Task
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseTask(text[startIdx+4:startIdx+length])
elif lowestStartIdx == 2: #RemoteCmd
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseRemoteCmd(text[startIdx+9:startIdx+length])
try: #why does hasattr not work here?
#print('Attempting to parse subtasks')
newItem['subtasks'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['subtasks']), indentLevel+1)
except:
pass
try:
newItem['cmds'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['cmds']), indentLevel+1)
except:
pass
structure.append(newItem)
nextCommands = self.__parseCommandStructure(text[startIdx+length:], indentLevel)
if nextCommands:
for c in nextCommands:
structure.append(c)
return structure
def extractCommandsForFrame(self, task):
frames = []
cmds = {}
for t in task['subtasks']:
subcmds = []
#extract frame index
frameLinearIdx = int(t['taskName'].replace('Frame', ''))
frames.append(frameLinearIdx)
for t_sub in t['subtasks']:
try:
for c in t_sub['cmds']:
subcmds.append(c)
except:
pass
if subcmds:
cmds[str(frameLinearIdx)] = subcmds
return cmds, frames
def extractCommandsForTexture(self, task):
cmds = []
for t in task['subtasks']:
try:
for c in t['cmds']:
cmds.append(c)
except:
pass
return cmds
def extractCommandHierarchy(self, jobs):
textureCommands = []
commands = {}
for j in jobs:
for t in j['subtasks']:
#get all texture conversion tasks
if t['taskName'] == 'Job Textures':
try:
newCommands = self.extractCommandsForTexture(t)
#textureCommands.append(newCommands)
for c in newCommands:
textureCommands.append(c)
except:
pass
#get commands for all frames
else:
newCommands, frames = self.extractCommandsForFrame(t)
commands.update(newCommands)
return textureCommands, commands, frames
def main():
with open('data/blue/shots/spool.alf', 'r') as myfile:
data = myfile.read()
parser = prman_AlfParser()
textureCmds, Cmds, frames = parser.parseFile(data)
print('Frames: ', frames)
if __name__ == "__main__":
main()
| gpl-3.0 | 1,440,855,506,533,615,600 | 3,639,471,218,085,992,400 | 32.452555 | 128 | 0.47818 | false |
gshivani/ansible-modules-extras | cloud/misc/virt.py | 8 | 14024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Virt management features
Copyright 2007, 2012 Red Hat, Inc
Michael DeHaan <[email protected]>
Seth Vidal <[email protected]>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: virt
short_description: Manages virtual machines supported by libvirt
description:
- Manages virtual machines supported by I(libvirt).
version_added: "0.2"
options:
name:
description:
- name of the guest VM being managed. Note that VM must be previously
defined with xml.
required: true
default: null
aliases: []
state:
description:
- Note that there may be some lag for state requests like C(shutdown)
since these refer only to VM states. After starting a guest, it may not
be immediately accessible.
required: false
choices: [ "running", "shutdown", "destroyed", "paused" ]
default: "no"
command:
description:
- in addition to state management, various non-idempotent commands are available. See examples
required: false
choices: ["create","status", "start", "stop", "pause", "unpause",
"shutdown", "undefine", "destroy", "get_xml", "autostart",
"freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
uri:
description:
- libvirt connection uri
required: false
defaults: qemu:///system
xml:
description:
- XML document used with the define command
required: false
default: null
requirements:
- "python >= 2.6"
- "libvirt-python"
author:
- "Ansible Core Team"
- '"Michael DeHaan (@mpdehaan)" <[email protected]>'
- '"Seth Vidal (@skvidal)" <[email protected]>'
'''
EXAMPLES = '''
# a playbook task line:
- virt: name=alpha state=running
# /usr/bin/ansible invocations
ansible host -m virt -a "name=alpha command=status"
ansible host -m virt -a "name=alpha command=get_xml"
ansible host -m virt -a "name=alpha command=create uri=lxc:///"
# a playbook example of defining and launching an LXC guest
tasks:
- name: define vm
virt: name=foo
command=define
xml="{{ lookup('template', 'container-template.xml.j2') }}"
uri=lxc:///
- name: start vm
virt: name=foo state=running uri=lxc:///
'''
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
import sys
try:
import libvirt
except ImportError:
print "failed=True msg='libvirt python module unavailable'"
sys.exit(1)
ALL_COMMANDS = []
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
ALL_COMMANDS.extend(VM_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
VIRT_STATE_NAME_MAP = {
0 : "running",
1 : "running",
2 : "running",
3 : "paused",
4 : "shutdown",
5 : "shutdown",
6 : "crashed"
}
class VMNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
cmd = "uname -r"
rc, stdout, stderr = self.module.run_command(cmd)
if "xen" in stdout:
conn = libvirt.open(None)
else:
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_vm(self, vmid):
"""
Extra bonus feature: vmid = -1 returns a list of everything
"""
conn = self.conn
vms = []
# this block of code borrowed from virt-manager:
# get working domain's name
ids = conn.listDomainsID()
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
# get defined domain
names = conn.listDefinedDomains()
for name in names:
vm = conn.lookupByName(name)
vms.append(vm)
if vmid == -1:
return vms
for vm in vms:
if vm.name() == vmid:
return vm
raise VMNotFound("virtual machine %s not found" % vmid)
def shutdown(self, vmid):
return self.find_vm(vmid).shutdown()
def pause(self, vmid):
return self.suspend(self.conn,vmid)
def unpause(self, vmid):
return self.resume(self.conn,vmid)
def suspend(self, vmid):
return self.find_vm(vmid).suspend()
def resume(self, vmid):
return self.find_vm(vmid).resume()
def create(self, vmid):
return self.find_vm(vmid).create()
def destroy(self, vmid):
return self.find_vm(vmid).destroy()
def undefine(self, vmid):
return self.find_vm(vmid).undefine()
def get_status2(self, vm):
state = vm.info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def get_status(self, vmid):
state = self.find_vm(vmid).info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def nodeinfo(self):
return self.conn.getInfo()
def get_type(self):
return self.conn.getType()
def get_xml(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.XMLDesc(0)
def get_maxVcpus(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxVcpus()
def get_maxMemory(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxMemory()
def getFreeMemory(self):
return self.conn.getFreeMemory()
def get_autostart(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.autostart()
def set_autostart(self, vmid, val):
vm = self.conn.lookupByName(vmid)
return vm.setAutostart(val)
def define_from_xml(self, xml):
return self.conn.defineXML(xml)
class Virt(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
def __get_conn(self):
self.conn = LibvirtConnection(self.uri, self.module)
return self.conn
def get_vm(self, vmid):
self.__get_conn()
return self.conn.find_vm(vmid)
def state(self):
vms = self.list_vms()
state = []
for vm in vms:
state_blurb = self.conn.get_status(vm)
state.append("%s %s" % (vm,state_blurb))
return state
def info(self):
vms = self.list_vms()
info = dict()
for vm in vms:
data = self.conn.find_vm(vm).info()
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
info[vm] = {
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
"maxMem" : str(data[1]),
"memory" : str(data[2]),
"nrVirtCpu" : data[3],
"cpuTime" : str(data[4]),
}
info[vm]["autostart"] = self.conn.get_autostart(vm)
return info
def nodeinfo(self):
self.__get_conn()
info = dict()
data = self.conn.nodeinfo()
info = {
"cpumodel" : str(data[0]),
"phymemory" : str(data[1]),
"cpus" : str(data[2]),
"cpumhz" : str(data[3]),
"numanodes" : str(data[4]),
"sockets" : str(data[5]),
"cpucores" : str(data[6]),
"cputhreads" : str(data[7])
}
return info
def list_vms(self, state=None):
self.conn = self.__get_conn()
vms = self.conn.find_vm(-1)
results = []
for x in vms:
try:
if state:
vmstate = self.conn.get_status2(x)
if vmstate == state:
results.append(x.name())
else:
results.append(x.name())
except:
pass
return results
def virttype(self):
return self.__get_conn().get_type()
def autostart(self, vmid):
self.conn = self.__get_conn()
return self.conn.set_autostart(vmid, True)
def freemem(self):
self.conn = self.__get_conn()
return self.conn.getFreeMemory()
def shutdown(self, vmid):
""" Make the machine with the given vmid stop running. Whatever that takes. """
self.__get_conn()
self.conn.shutdown(vmid)
return 0
def pause(self, vmid):
""" Pause the machine with the given vmid. """
self.__get_conn()
return self.conn.suspend(vmid)
def unpause(self, vmid):
""" Unpause the machine with the given vmid. """
self.__get_conn()
return self.conn.resume(vmid)
def create(self, vmid):
""" Start the machine via the given vmid """
self.__get_conn()
return self.conn.create(vmid)
def start(self, vmid):
""" Start the machine via the given id/name """
self.__get_conn()
return self.conn.create(vmid)
def destroy(self, vmid):
""" Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
self.__get_conn()
return self.conn.destroy(vmid)
def undefine(self, vmid):
""" Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
self.__get_conn()
return self.conn.undefine(vmid)
def status(self, vmid):
"""
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
"""
self.__get_conn()
return self.conn.get_status(vmid)
def get_xml(self, vmid):
"""
Receive a Vm id as input
Return an xml describing vm config returned by a libvirt call
"""
self.__get_conn()
return self.conn.get_xml(vmid)
def get_maxVcpus(self, vmid):
"""
Gets the max number of VCPUs on a guest
"""
self.__get_conn()
return self.conn.get_maxVcpus(vmid)
def get_max_memory(self, vmid):
"""
Gets the max memory on a guest
"""
self.__get_conn()
return self.conn.get_MaxMemory(vmid)
def define(self, xml):
"""
Define a guest with the given xml
"""
self.__get_conn()
return self.conn.define_from_xml(xml)
def core(module):
state = module.params.get('state', None)
guest = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
v = Virt(uri, module)
res = {}
if state and command=='list_vms':
res = v.list_vms(state=state)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
if state:
if not guest:
module.fail_json(msg = "state change requires a guest specified")
res['changed'] = False
if state == 'running':
if v.status(guest) is 'paused':
res['changed'] = True
res['msg'] = v.unpause(guest)
elif v.status(guest) is not 'running':
res['changed'] = True
res['msg'] = v.start(guest)
elif state == 'shutdown':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.shutdown(guest)
elif state == 'destroyed':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.destroy(guest)
elif state == 'paused':
if v.status(guest) is 'running':
res['changed'] = True
res['msg'] = v.pause(guest)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in VM_COMMANDS:
if not guest:
module.fail_json(msg = "%s requires 1 argument: guest" % command)
if command == 'define':
if not xml:
module.fail_json(msg = "define requires xml argument")
try:
v.get_vm(guest)
except VMNotFound:
v.define(xml)
res = {'changed': True, 'created': guest}
return VIRT_SUCCESS, res
res = getattr(v, command)(guest)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % basecmd)
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule(argument_spec=dict(
name = dict(aliases=['guest']),
state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
))
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception, e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 6,642,548,976,279,887,000 | 5,484,692,244,837,528,000 | 27.160643 | 116 | 0.558115 | false |
Tribler/decentralized-mortgage-market | market/models/investment.py | 2 | 2937 | from enum import Enum as PyEnum
from base64 import urlsafe_b64encode
from storm.properties import Int, Float, RawStr
from storm.references import ReferenceSet
from protobuf_to_dict import dict_to_protobuf, protobuf_to_dict
from market.community.market.conversion_pb2 import Investment as InvestmentPB
from market.database.types import Enum
from market.models.transfer import Transfer
class InvestmentStatus(PyEnum):
NONE = 0
PENDING = 1
ACCEPTED = 2
REJECTED = 3
FORSALE = 4
class Investment(object):
"""
This class represents an investment of someone in a specific campaign.
"""
__storm_table__ = 'investment'
__storm_primary__ = 'id', 'user_id'
id = Int()
user_id = RawStr()
owner_id = RawStr()
amount = Float()
interest_rate = Float()
campaign_id = Int()
campaign_user_id = RawStr()
status = Enum(InvestmentStatus)
contract_id = RawStr()
transfers = ReferenceSet((id, user_id), (Transfer.investment_id, Transfer.investment_user_id))
def __init__(self, identifier, user_id, amount, interest_rate, campaign_id, campaign_user_id, status, contract_id=''):
self.id = identifier
self.user_id = user_id
self.amount = amount
self.interest_rate = interest_rate
self.campaign_id = campaign_id
self.campaign_user_id = campaign_user_id
self.status = status
self.contract_id = contract_id
def to_dict(self, api_response=False):
return {
'id': self.id,
'user_id': urlsafe_b64encode(self.user_id) if api_response else self.user_id,
'amount': self.amount,
'interest_rate': self.interest_rate,
'campaign_id': self.campaign_id,
'campaign_user_id': urlsafe_b64encode(self.campaign_user_id) if api_response else self.campaign_user_id,
'status': self.status.name if api_response else self.status.value,
'contract_id': urlsafe_b64encode(self.contract_id) if api_response else self.contract_id
}
@staticmethod
def from_dict(investment_dict):
try:
status = InvestmentStatus(investment_dict['status'])
except ValueError:
return None
return Investment(investment_dict['id'],
investment_dict['user_id'],
investment_dict['amount'],
investment_dict['interest_rate'],
investment_dict['campaign_id'],
investment_dict['campaign_user_id'],
status,
investment_dict['contract_id'])
def to_bin(self):
return dict_to_protobuf(InvestmentPB, self.to_dict()).SerializeToString()
@staticmethod
def from_bin(binary):
msg = InvestmentPB()
msg.ParseFromString(binary)
return Investment.from_dict(protobuf_to_dict(msg))
| gpl-3.0 | -8,920,682,484,534,007,000 | -8,750,055,227,349,285,000 | 33.964286 | 122 | 0.618658 | false |
neerajvashistha/pa-dude | lib/python2.7/site-packages/docutils/readers/pep.py | 136 | 1555 | # $Id: pep.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=True, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| mit | -13,705,808,763,925,704 | 6,396,309,691,809,998,000 | 31.395833 | 75 | 0.675241 | false |
jokajak/itweb | data/env/lib/python2.6/site-packages/MarkupSafe-0.11-py2.6-linux-x86_64.egg/markupsafe/tests.py | 24 | 2610 | import gc
import unittest
from markupsafe import Markup, escape, escape_silent
class MarkupTestCase(unittest.TestCase):
def test_markup_operations(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == unicode(escape(unsafe)) + unicode(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in xrange(20):
for item in xrange(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-3.0 | 7,616,760,054,638,115,000 | -1,589,423,943,333,524,500 | 31.625 | 82 | 0.558238 | false |
Joergen/olympia | apps/pages/views.py | 15 | 2236 | from collections import defaultdict
from django.conf import settings
from django.shortcuts import render
from devhub.models import ActivityLog
from users.models import UserProfile
def credits(request):
developers = (UserProfile.objects
.exclude(display_name=None)
.filter(groupuser__group__name='Developers Credits')
.order_by('display_name')
.distinct())
past_developers = (UserProfile.objects
.exclude(display_name=None)
.filter(
groupuser__group__name='Past Developers Credits')
.order_by('display_name')
.distinct())
other_contribs = (UserProfile.objects
.exclude(display_name=None)
.filter(
groupuser__group__name='Other Contributors Credits')
.order_by('display_name')
.distinct())
languages = sorted(list(
set(settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES) -
set(['en-US'])))
localizers = []
for lang in languages:
users = (UserProfile.objects
.exclude(display_name=None)
.filter(groupuser__group__name='%s Localizers' % lang)
.order_by('display_name')
.distinct())
if users:
localizers.append((lang, users))
total_reviews = (ActivityLog.objects.total_reviews()
.filter(approval_count__gt=10))
reviewers = defaultdict(list)
for total in total_reviews:
cnt = total.get('approval_count', 0)
if cnt > 1000:
reviewers[1000].append(total)
elif cnt > 500:
reviewers[500].append(total)
elif cnt > 100:
reviewers[100].append(total)
elif cnt > 10:
reviewers[10].append(total)
context = {
'developers': developers,
'past_developers': past_developers,
'other_contribs': other_contribs,
'localizers': localizers,
'reviewers': reviewers,
}
return render(request, 'pages/credits.html', context)
| bsd-3-clause | 825,930,350,491,399,000 | 5,720,966,901,825,939,000 | 32.878788 | 78 | 0.541592 | false |
trishnaguha/ansible | lib/ansible/modules/cloud/google/gcp_spanner_instance_facts.py | 4 | 5935 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_instance_facts
description:
- Gather facts for GCP Instance
short_description: Gather facts for GCP Instance
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options: {}
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a instance facts
gcp_spanner_instance_facts:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the instance, which cannot be changed after the instance
is created. Values are of the form projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9].
The final segment of the name must be between 6 and 30 characters in length.
returned: success
type: str
config:
description:
- A reference to the instance configuration.
returned: success
type: str
displayName:
description:
- The descriptive name for this instance as it appears in UIs. Must be unique
per project and between 4 and 30 characters in length.
returned: success
type: str
nodeCount:
description:
- The number of nodes allocated to this instance.
returned: success
type: int
labels:
description:
- Cloud Labels are a flexible and lightweight mechanism for organizing cloud
resources into groups that reflect a customer's organizational needs and deployment
strategies. Cloud Labels can be used to filter collections of resources. They
can be used to control how resource metrics are aggregated. And they can be
used as arguments to policy management rules (e.g. route, firewall, load balancing,
etc.).
- 'Label keys must be between 1 and 63 characters long and must conform to the
following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.'
- Label values must be between 0 and 63 characters long and must conform to
the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
- No more than 64 labels can be associated with a given resource.
- See U(https://goo.gl/xmQnxf) for more information on and examples of labels.
- 'If you plan to use labels in your own code, please note that additional characters
may be allowed in the future. And so you are advised to use an internal label
representation, such as JSON, which doesn''t rely upon specific characters
being disallowed. For example, representing labels as the string: name + "_"
+ value would prove problematic if we were to allow "_" in a future release.'
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
items = fetch_list(module, collection(module))
if items.get('instances'):
items = items.get('instances')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://spanner.googleapis.com/v1/projects/{project}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | -5,907,693,467,391,471,000 | 1,988,811,430,406,751,000 | 33.505814 | 99 | 0.57658 | false |
rainysia/dotfiles | doc/python/test/selenium_localchromeff_remoteIE.py | 1 | 1961 | #!/usr/bin/env python
# coding=utf-8
#chrome localhost
'''
import os
from selenium import webdriver
chromedriver = "/home/softs/selenium/chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
driver.get("http://baidu.com")
driver.quit()
'''
#firefox(iceweasel) localhost
'''
import os
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://www.baidu.com')
browser.save_screenshot('screen.png')
browser.quit()
'''
#remote chrome
#remote IE
import os
# For Chinese
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
ie_desired_cap = {'os': 'Windows', 'os_version': '2008', 'browser': 'IE', 'browser_version': '9.0', 'resolution' : '1024x768'}
tommy_remote_url = 'http://192.168.85.123:4444/wd/hub'
derek_remote_url = 'http://192.168.87.72:18181/wd/hub'
# command_executor = 'http://USERNAME:[email protected]:80/wd/hub'
driver = webdriver.Remote(
command_executor=derek_remote_url,
desired_capabilities=ie_desired_cap)
#google, name=q
driver.get("http://www.baidu.com")
eg_title = "百度" #有中文,需要import sys reload(sys) sys.setdefaultencoding('utf-8')
print driver.title
#print help(driver)
try:
if not eg_title in driver.title:
raise Exception("Unable to load ",eg_title," page!")
elem = driver.find_element_by_name("wd")
elem.send_keys("domain")
elem.submit()
#two ways to wait, explict & implicit
#WebDriverWait.until(condition-that-finds-the-element) #explict
#driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS) #implicit
print driver.title
sleep(10)
print '12345\n'
except Exception, e:
raise e
finally:
#driver.implicitly_wait(10)
#driver.set_script_timeout(10)
driver.quit()
| mit | 2,007,293,535,977,960,000 | 8,968,695,936,949,232,000 | 25.310811 | 126 | 0.717514 | false |
CanalTP/navitia | source/jormungandr/jormungandr/scenarios/tests/journey_compare_tests.py | 1 | 43791 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from copy import deepcopy
from jormungandr.scenarios import journey_filter as jf
from jormungandr.scenarios.utils import DepartureJourneySorter, ArrivalJourneySorter
import navitiacommon.response_pb2 as response_pb2
from jormungandr.scenarios.new_default import sort_journeys
from jormungandr.utils import str_to_time_stamp
import random
import itertools
import functools
def empty_journeys_test():
response = response_pb2.Response()
sort_journeys(response, 'arrival_time', True)
assert not response.journeys
def different_arrival_times_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
def different_departure_times_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.departure_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.departure_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
sort_journeys(response, 'departure_time', True)
assert response.journeys[0].departure_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].departure_date_time == str_to_time_stamp("20140422T0800")
def different_duration_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 3 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 3 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 3 * 60
assert response.journeys[1].duration == 5 * 60
def different_nb_transfers_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 25 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 25 * 60
assert response.journeys[1].duration == 25 * 60
assert response.journeys[0].nb_transfers == 0
assert response.journeys[1].nb_transfers == 1
def different_duration_non_pt_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey1.sections[4].type = response_pb2.STREET_NETWORK
journey1.sections[4].duration = 10 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 1
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 5 * 60
journey2.sections[1].type = response_pb2.TRANSFER
journey2.sections[1].duration = 3 * 60
journey2.sections[2].type = response_pb2.WAITING
journey2.sections[2].duration = 2 * 60
journey2.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[3].duration = 15 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 25 * 60
assert response.journeys[1].duration == 25 * 60
assert response.journeys[0].nb_transfers == 1
assert response.journeys[1].nb_transfers == 1
# We want to have journey2 in first, this is the one with 4 sections
assert len(response.journeys[0].sections) == 4
assert len(response.journeys[1].sections) == 5
def create_dummy_journey():
journey = response_pb2.Journey()
journey.arrival_date_time = str_to_time_stamp("20140422T0800")
journey.duration = 25 * 60
journey.nb_transfers = 1
s = journey.sections.add()
s.type = response_pb2.PUBLIC_TRANSPORT
s.origin.uri = "stop_point_1"
s.destination.uri = "stop_point_2"
s.vehicle_journey.uri = "vj_toto"
s.duration = 5 * 60
s = journey.sections.add()
s.type = response_pb2.TRANSFER
s.duration = 3 * 60
s = journey.sections.add()
s.type = response_pb2.WAITING
s.duration = 2 * 60
s = journey.sections.add()
s.type = response_pb2.PUBLIC_TRANSPORT
s.origin.uri = "stop_point_3"
s.destination.uri = "stop_point_4"
s.duration = 15 * 60
s = journey.sections.add()
s.type = response_pb2.STREET_NETWORK
s.duration = 10 * 60
return journey
def journey_pairs_gen(list_responses):
return itertools.combinations(jf.get_qualified_journeys(list_responses), 2)
def test_get_qualified_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.tags.append("a_tag")
journey2 = responses[0].journeys.add()
journey2.tags.append("to_delete")
journey3 = responses[0].journeys.add()
journey3.tags.append("another_tag")
journey3.tags.append("to_delete")
for qualified in jf.get_qualified_journeys(responses):
assert qualified.tags[0] == 'a_tag'
def test_num_qualifed_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.tags.append("a_tag")
journey2 = responses[0].journeys.add()
journey2.tags.append("to_delete")
journey3 = responses[0].journeys.add()
journey3.tags.append("another_tag")
assert jf.nb_qualifed_journeys(responses) == 2
def test_similar_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
journey2 = responses[0].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[0].uris.vehicle_journey = 'bob'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert len(list(jf.get_qualified_journeys(responses))) == 1
def test_similar_journeys_test2():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bob'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert len(list(jf.get_qualified_journeys(responses))) == 1
def test_similar_journeys_test3():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bobette'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_different_transfer():
"""
If 2 journeys take the same vjs but with a different number of sections,
one should be filtered
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections.add()
journey1.duration = 42
journey1.sections[-1].uris.vehicle_journey = 'bobette'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bobette'
jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_different_waiting_durations():
"""
If 2 journeys take the same vj, same number of sections but with different waiting durations,
filter one with smaller waiting duration
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 600
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 600
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 25
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 175
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {})
assert 'to_delete' not in journey2.tags
assert 'to_delete' in journey1.tags
def test_similar_journeys_multi_trasfer_and_different_waiting_durations():
"""
If 2 journeys take the same vj, same number of sections and several waitings with different waiting durations,
for each journey find "min waiting duration"
keep the journey which has larger "min waiting duration"
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 1000
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 10
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 190
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'boby'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 1000
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 20
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 180
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 100
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 100
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'boby'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_with_and_without_waiting_section():
"""
If 2 journeys take the same vj, one with a waiting section and another without,
filtere one with transfer but without waiting section
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 600
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 600
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_walking_bike():
"""
If we have 2 direct path, one walking and one by bike, we should
not filter any journey
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 42
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 42
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Bike
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' not in journey2.tags
def test_similar_journeys_car_park():
"""
We have to consider a journey with
CAR / PARK / WALK to be equal to CAR / PARK
"""
responses = [response_pb2.Response()]
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Car
journey1.sections.add()
journey1.sections[-1].type = response_pb2.PARK
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Car
journey2.sections.add()
journey2.sections[-1].type = response_pb2.PARK
assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_similar_journeys_bss_park():
"""
We have to consider a journey with
WALK / GET A BIKE / BSS to be equals to GET A BIKE / BSS
"""
responses = [response_pb2.Response()]
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey1.sections.add()
journey1.sections[-1].type = response_pb2.BSS_RENT
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Bss
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.BSS_RENT
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Bss
assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_similar_journeys_crowfly_rs():
"""
We have to consider a journey with
CROWFLY WALK to be different than CROWFLY Ridesharing
"""
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.CROW_FLY
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.CROW_FLY
journey2.sections[-1].street_network.mode = response_pb2.Ridesharing
assert not jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_departure_sort():
"""
we want to sort by departure hour, then by duration
"""
j1 = response_pb2.Journey()
j1.departure_date_time = str_to_time_stamp('20151005T071000')
j1.arrival_date_time = str_to_time_stamp('20151005T081900')
j1.duration = j1.arrival_date_time - j1.departure_date_time
j1.nb_transfers = 0
j2 = response_pb2.Journey()
j2.departure_date_time = str_to_time_stamp('20151005T072200')
j2.arrival_date_time = str_to_time_stamp('20151005T083500')
j2.duration = j2.arrival_date_time - j2.departure_date_time
j2.nb_transfers = 0
j3 = response_pb2.Journey()
j3.departure_date_time = str_to_time_stamp('20151005T074500')
j3.arrival_date_time = str_to_time_stamp('20151005T091200')
j3.duration = j3.arrival_date_time - j3.departure_date_time
j3.nb_transfers = 0
j4 = response_pb2.Journey()
j4.departure_date_time = str_to_time_stamp('20151005T074500')
j4.arrival_date_time = str_to_time_stamp('20151005T091100')
j4.duration = j4.arrival_date_time - j4.departure_date_time
j4.nb_transfers = 0
j5 = response_pb2.Journey()
j5.departure_date_time = str_to_time_stamp('20151005T074500')
j5.arrival_date_time = str_to_time_stamp('20151005T090800')
j5.duration = j5.arrival_date_time - j5.departure_date_time
j5.nb_transfers = 0
result = [j1, j2, j3, j4, j5]
random.shuffle(result)
comparator = DepartureJourneySorter(True)
result.sort(key=functools.cmp_to_key(comparator))
assert result[0] == j1
assert result[1] == j2
assert result[2] == j5
assert result[3] == j4
assert result[4] == j3
def test_arrival_sort():
"""
we want to sort by arrival hour, then by duration
"""
j1 = response_pb2.Journey()
j1.departure_date_time = str_to_time_stamp('20151005T071000')
j1.arrival_date_time = str_to_time_stamp('20151005T081900')
j1.duration = j1.arrival_date_time - j1.departure_date_time
j1.nb_transfers = 0
j2 = response_pb2.Journey()
j2.departure_date_time = str_to_time_stamp('20151005T072200')
j2.arrival_date_time = str_to_time_stamp('20151005T083500')
j2.duration = j2.arrival_date_time - j2.departure_date_time
j2.nb_transfers = 0
j3 = response_pb2.Journey()
j3.departure_date_time = str_to_time_stamp('20151005T074500')
j3.arrival_date_time = str_to_time_stamp('20151005T091200')
j3.duration = j3.arrival_date_time - j3.departure_date_time
j3.nb_transfers = 0
j4 = response_pb2.Journey()
j4.departure_date_time = str_to_time_stamp('20151005T075000')
j4.arrival_date_time = str_to_time_stamp('20151005T091200')
j4.duration = j4.arrival_date_time - j4.departure_date_time
j4.nb_transfers = 0
j5 = response_pb2.Journey()
j5.departure_date_time = str_to_time_stamp('20151005T075500')
j5.arrival_date_time = str_to_time_stamp('20151005T091200')
j5.duration = j5.arrival_date_time - j5.departure_date_time
j5.nb_transfers = 0
result = [j1, j2, j3, j4, j5]
random.shuffle(result)
comparator = ArrivalJourneySorter(True)
result.sort(key=functools.cmp_to_key(comparator))
assert result[0] == j1
assert result[1] == j2
assert result[2] == j5
assert result[3] == j4
assert result[4] == j3
def test_heavy_journey_walking():
"""
we don't filter any journey with walking
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
def test_heavy_journey_bike():
"""
the first time the duration of the biking section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.durations.bike = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
journey.durations.bike = journey.sections[-1].duration = 5
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_filter_wrapper():
"""
Testing that filter_wrapper is fine (see filter_wrapper doc)
"""
class LoveHateFilter(jf.SingleJourneyFilter):
message = 'i_dont_like_you'
def __init__(self, love=True):
self.love = love
def filter_func(self, journey):
return self.love
ref_journey = response_pb2.Journey()
# first we test when debug-mode deactivated (each time both OK-filter and KO-filter)
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=False))
assert not wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
# test using without debug mode (should be deactivated)
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=False))
assert not wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
# test when debug-mode is activated
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=False))
assert wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' in j.tags
def test_heavy_journey_car():
"""
the first time the duration of the car section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.durations.car = journey.sections[-1].duration = 25
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
journey.durations.car = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_heavy_journey_taxi():
"""
the first time the duration of the taxi section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.durations.taxi = journey.sections[-1].duration = 25
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20)
assert f.filter_func(journey)
journey.durations.taxi = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_heavy_journey_bss():
"""
we should not remove any bss journey since it is already in concurrence with the walking
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.BSS_RENT
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.BSS_PUT_BACK
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
journey.durations.bike = 5
journey.durations.walking = 10
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
def test_activate_deactivate_min_bike():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : Bike
B->C : public transport
C->D : Bike
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 7
journey.durations.bike = 12
f = jf.FilterTooShortHeavyJourneys(min_bike=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike'])
assert f.filter_func(journey)
# case 6: request with bike only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.bike = 29
f = jf.FilterTooShortHeavyJourneys(min_bike=17, orig_modes=['bike'], dest_modes=['bike'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.bike = 12
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike'], dest_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_activate_deactivate_min_car():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : car
B->C : public transport
C->D : car
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.sections[-1].duration = 7
journey.durations.car = 12
f = jf.FilterTooShortHeavyJourneys(min_car=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car'])
assert f.filter_func(journey)
# case 6: request with car only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.car = 29
f = jf.FilterTooShortHeavyJourneys(min_car=17, orig_modes=['car'], dest_modes=['car'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.car = 12
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car'], dest_modes=['car', 'walking'])
assert not f.filter_func(journey)
def test_activate_deactivate_min_taxi():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : taxi
B->C : public transport
C->D : taxi
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.sections[-1].duration = 7
journey.durations.taxi = 12
f = jf.FilterTooShortHeavyJourneys(min_taxi=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi'])
assert f.filter_func(journey)
# case 6: request with taxi only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.taxi = 29
f = jf.FilterTooShortHeavyJourneys(min_taxi=17, orig_modes=['taxi'], dest_modes=['taxi'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.taxi = 12
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi'], dest_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
def test_filter_direct_path_mode_car():
# is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["bike"])
assert not f.filter_func(journey)
# is_dp and is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["car"])
assert f.filter_func(journey)
# is_dp and is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["taxi", "surf", "car", "bike"])
assert f.filter_func(journey)
# not is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
f = jf.FilterDirectPathMode(["bike"])
assert f.filter_func(journey)
# not is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
f = jf.FilterDirectPathMode(["car"])
assert f.filter_func(journey)
def test_heavy_journey_ridesharing():
"""
the first time the duration of the ridesharing section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Ridesharing
journey.durations.ridesharing = journey.sections[-1].duration = 25
# Ridesharing duration is superior to min_ridesharing value so we have ridesharing section
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking'])
assert f.filter_func(journey)
# Ridesharing duration is inferior to min_ridesharing value but there is no walking option
# In this case we have ridesharing section
journey.durations.ridesharing = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing'])
assert f.filter_func(journey)
# Ridesharing duration is inferior to min_ridesharing value and there is also walking option
# In this case we have reject ridesharing section
journey.durations.ridesharing = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking'])
assert not f.filter_func(journey)
| agpl-3.0 | 2,605,303,475,678,375,000 | -8,807,574,945,665,231,000 | 34.982744 | 115 | 0.689343 | false |
jatinmistry13/pattern | pattern/web/pdf/pdfdevice.py | 56 | 5319 | #!/usr/bin/env python2
import sys
from utils import mult_matrix, translate_matrix
from utils import enc, bbox2str
from pdffont import PDFUnicodeNotDefined
## PDFDevice
##
class PDFDevice(object):
debug = 0
def __init__(self, rsrcmgr):
self.rsrcmgr = rsrcmgr
self.ctm = None
return
def __repr__(self):
return '<PDFDevice>'
def close(self):
return
def set_ctm(self, ctm):
self.ctm = ctm
return
def begin_tag(self, tag, props=None):
return
def end_tag(self):
return
def do_tag(self, tag, props=None):
return
def begin_page(self, page, ctm):
return
def end_page(self, page):
return
def begin_figure(self, name, bbox, matrix):
return
def end_figure(self, name):
return
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
return
def render_image(self, name, stream):
return
def render_string(self, textstate, seq):
return
## PDFTextDevice
##
class PDFTextDevice(PDFDevice):
def render_string(self, textstate, seq):
matrix = mult_matrix(textstate.matrix, self.ctm)
font = textstate.font
fontsize = textstate.fontsize
scaling = textstate.scaling * .01
charspace = textstate.charspace * scaling
wordspace = textstate.wordspace * scaling
rise = textstate.rise
if font.is_multibyte():
wordspace = 0
dxscale = .001 * fontsize * scaling
if font.is_vertical():
textstate.linematrix = self.render_string_vertical(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
else:
textstate.linematrix = self.render_string_horizontal(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
return
def render_string_horizontal(self, seq, matrix, (x,y),
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
needcharspace = False
for obj in seq:
if isinstance(obj, int) or isinstance(obj, float):
x -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
x += charspace
x += self.render_char(translate_matrix(matrix, (x,y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
x += wordspace
needcharspace = True
return (x, y)
def render_string_vertical(self, seq, matrix, (x,y),
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
needcharspace = False
for obj in seq:
if isinstance(obj, int) or isinstance(obj, float):
y -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
y += charspace
y += self.render_char(translate_matrix(matrix, (x,y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
y += wordspace
needcharspace = True
return (x, y)
def render_char(self, matrix, font, fontsize, scaling, rise, cid):
return 0
## TagExtractor
##
class TagExtractor(PDFDevice):
def __init__(self, rsrcmgr, outfp, codec='utf-8', debug=0):
PDFDevice.__init__(self, rsrcmgr)
self.outfp = outfp
self.codec = codec
self.debug = debug
self.pageno = 0
self._stack = []
return
def render_string(self, textstate, seq):
font = textstate.font
text = ''
for obj in seq:
if not isinstance(obj, str): continue
chars = font.decode(obj)
for cid in chars:
try:
char = font.to_unichr(cid)
text += char
except PDFUnicodeNotDefined:
pass
self.outfp.write(enc(text, self.codec))
return
def begin_page(self, page, ctm):
self.outfp.write('<page id="%s" bbox="%s" rotate="%d">' %
(self.pageno, bbox2str(page.mediabox), page.rotate))
return
def end_page(self, page):
self.outfp.write('</page>\n')
self.pageno += 1
return
def begin_tag(self, tag, props=None):
s = ''
if isinstance(props, dict):
s = ''.join( ' %s="%s"' % (enc(k), enc(str(v))) for (k,v)
in sorted(props.iteritems()) )
self.outfp.write('<%s%s>' % (enc(tag.name), s))
self._stack.append(tag)
return
def end_tag(self):
assert self._stack
tag = self._stack.pop(-1)
self.outfp.write('</%s>' % enc(tag.name))
return
def do_tag(self, tag, props=None):
self.begin_tag(tag, props)
self._stack.pop(-1)
return
| bsd-3-clause | -8,214,369,853,768,839,000 | -7,771,881,257,096,914,000 | 29.568966 | 95 | 0.522279 | false |
ami/lob-python | lob/api_requestor.py | 1 | 2714 | import requests
import lob
import json
import resource
from lob import error
from version import VERSION
def _is_file_like(obj):
"""
Checks if an object is file-like enough to be sent to requests.
In particular, file, StringIO and cStringIO objects are file-like.
Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like
"""
return hasattr(obj, 'read') and hasattr(obj, 'seek')
class APIRequestor(object):
def __init__(self, key=None):
self.api_key = key or lob.api_key
def parse_response(self, resp):
payload = json.loads(resp.content)
if resp.status_code == 200:
return payload
elif resp.status_code == 401:
raise error.AuthenticationError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
elif resp.status_code in [404, 422]:
raise error.InvalidRequestError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
else:
#pragma: no cover
raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) # pragma: no cover
def request(self, method, url, params=None):
headers = {
'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION
}
if hasattr(lob, 'api_version'):
headers['Lob-Version'] = lob.api_version
if method == 'get':
return self.parse_response(
requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers)
)
elif method == 'delete':
return self.parse_response(
requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers)
)
elif method == 'post':
data = {}
files = params.pop('files', {})
explodedParams = {}
for k,v in params.iteritems():
if isinstance(v, dict) and not isinstance(v, resource.LobObject):
for k2,v2 in v.iteritems():
explodedParams[k + '[' + k2 + ']'] = v2
else:
explodedParams[k] = v
for k,v in explodedParams.iteritems():
if _is_file_like(v):
files[k] = v
else:
if isinstance(v, resource.LobObject):
data[k] = v.id
else:
data[k] = v
return self.parse_response(
requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers)
)
| mit | -336,832,162,018,910,900 | -9,001,635,774,844,663,000 | 34.710526 | 122 | 0.542373 | false |
jupierce/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.40/lookup_plugins/oo_option.py | 37 | 2602 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
oo_option lookup plugin for openshift-ansible
Usage:
- debug:
msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
This returns, by order of priority:
* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …`
* if it exists, the envirnoment variable named `<key>`
* if none of the above conditions are met, empty string is returned
'''
import os
# pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring
try:
# ansible-2.0
from ansible.plugins.lookup import LookupBase
except ImportError:
# ansible-1.9.x
class LookupBase(object):
def __init__(self, basedir=None, runner=None, **kwargs):
self.runner = runner
self.basedir = self.runner.basedir
def get_basedir(self, variables):
return self.basedir
# Reason: disable too-few-public-methods because the `run` method is the only
# one required by the Ansible API
# Status: permanently disabled
# pylint: disable=too-few-public-methods
class LookupModule(LookupBase):
''' oo_option lookup plugin main class '''
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def __init__(self, basedir=None, **kwargs):
''' Constructor '''
self.basedir = basedir
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def run(self, terms, variables, **kwargs):
''' Main execution path '''
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = 'cli_' + option_name
if 'vars' in variables and cli_key in variables['vars']:
ret.append(variables['vars'][cli_key])
elif option_name in os.environ:
ret.append(os.environ[option_name])
else:
ret.append('')
return ret
| apache-2.0 | -5,123,773,338,354,557,000 | -1,492,159,507,415,471,600 | 34.027027 | 143 | 0.645062 | false |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/tabnanny.py | 394 | 11336 | #! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % file
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError, msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag, nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
print "offending line: %r" % (line,)
print nag.get_msg()
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print file
else: print file, badline, repr(line)
return
if verbose:
print "%r: Clean bill of health." % (file,)
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i/tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i/ts + 1)*ts*count[i] =
# trailing + ts * sum (i/ts + 1)*count[i] =
# trailing + ts * sum i/ts*count[i] + count[i] =
# trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i/ts*count[i]) + num_tabs]
# and note that i/ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i/tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = map(lambda tup: str(tup[0]), w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
| mit | 589,795,122,083,410,700 | 1,521,328,862,164,545,800 | 33.455927 | 81 | 0.554781 | false |
eerwitt/tensorflow | tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py | 20 | 29102 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of utilities supporting export to SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import time
# pylint: disable=g-import-not-at-top
# TODO(jart): #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.learn.python.learn import export_strategy as export_strategy_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
class SavedModelExportUtilsTest(test.TestCase):
def test_build_standardized_signature_def_regression(self):
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.LINEAR_REGRESSION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.REGRESS_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.REGRESS_OUTPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-1:0", dtype=dtype, tensor_shape=shape))
expected_signature_def.method_name = signature_constants.REGRESS_METHOD_NAME
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification(self):
"""Tests classification with one output tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-1:0", dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification2(self):
"""Tests multiple output tensors that include classes and probabilites."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
# Will be used for CLASSIFY_OUTPUT_SCORES.
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0", dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification3(self):
"""Tests multiple output tensors that include classes and scores."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0", dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification4(self):
"""Tests classification without classes tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification5(self):
"""Tests multiple output tensors that include integer classes and scores.
Integer classes are dropped out, because Servo classification can only serve
string classes. So, only scores are present in the signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification6(self):
"""Tests multiple output tensors that with integer classes and no scores.
Servo classification cannot serve integer classes, but no scores are
available. So, we fall back to predict signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_int64 = types_pb2.DataType.Value("DT_INT64")
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.PREDICT_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs["classes"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0", dtype=dtype_int64,
tensor_shape=shape))
expected_signature_def.outputs["logits"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-logits:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.PREDICT_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_get_input_alternatives(self):
input_ops = input_fn_utils.InputFnOps("bogus features dict", None,
"bogus default input dict")
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_ops)
self.assertEqual(input_alternatives[
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY],
"bogus default input dict")
# self.assertEqual(input_alternatives[
# saved_model_export_utils.FEATURES_INPUT_ALTERNATIVE_KEY],
# "bogus features dict")
def test_get_output_alternatives_explicit_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1")
self.assertEqual(provided_output_alternatives, output_alternatives)
def test_get_output_alternatives_wrong_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: ['head-1', 'head-2', "
"'head-3']", str(e.exception))
def test_get_output_alternatives_single_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
{"output": prediction_tensor}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({"head-1":
(constants.ProblemType.LINEAR_REGRESSION,
{"output": prediction_tensor})},
output_alternatives)
def test_get_output_alternatives_multi_no_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops)
self.assertEqual("Please specify a default_output_alternative. Available "
"output_alternatives are: ['head-1', 'head-2', 'head-3']",
str(e.exception))
def test_get_output_alternatives_none_provided(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual(
{"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor})},
output_alternatives)
def test_get_output_alternatives_empty_provided_with_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: []", str(e.exception))
def test_get_output_alternatives_empty_provided_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual(
{"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor})},
output_alternatives)
def test_get_output_alternatives_implicit_single(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"output": prediction_tensor
})
}, output_alternatives)
def test_build_all_signature_defs(self):
input_features = constant_op.constant(["10"])
input_example = constant_op.constant(["11"])
input_ops = input_fn_utils.InputFnOps({
"features": input_features
}, None, {"default input": input_example})
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant(["1"])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
signature_defs = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(input_example,
output_1),
"default_input_alternative:head-1":
signature_def_utils.regression_signature_def(input_example,
output_1),
"default_input_alternative:head-2":
signature_def_utils.classification_signature_def(input_example,
output_2, None),
"default_input_alternative:head-3":
signature_def_utils.predict_signature_def({
"input": input_example
}, {"output": output_3}),
# "features_input_alternative:head-1":
# signature_def_utils.regression_signature_def(input_features,
# output_1),
# "features_input_alternative:head-2":
# signature_def_utils.classification_signature_def(input_features,
# output_2, None),
# "features_input_alternative:head-3":
# signature_def_utils.predict_signature_def({
# "input": input_features
# }, {"output": output_3}),
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_legacy_input_fn_not_supported(self):
"""Tests that legacy input_fn returning (features, labels) raises error.
serving_input_fn must return InputFnOps including a default input
alternative.
"""
input_features = constant_op.constant(["10"])
input_ops = ({"features": input_features}, None)
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant(["1"])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
with self.assertRaisesRegexp(
ValueError, "A default input_alternative must be provided"):
saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertTrue(int(time_1) < int(time_2))
self.assertTrue(int(time_2) < int(time_3))
def test_garbage_collect_exports(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
export_dir_1 = _create_test_export_dir(export_dir_base)
export_dir_2 = _create_test_export_dir(export_dir_base)
export_dir_3 = _create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
self.assertTrue(gfile.Exists(export_dir_1))
self.assertTrue(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
# Garbage collect all but the most recent 2 exports,
# where recency is determined based on the timestamp directory names.
saved_model_export_utils.garbage_collect_exports(export_dir_base, 2)
self.assertFalse(gfile.Exists(export_dir_1))
self.assertFalse(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
def test_get_most_recent_export(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
(most_recent_export_dir, most_recent_export_version) = (
saved_model_export_utils.get_most_recent_export(export_dir_base))
self.assertEqual(compat.as_bytes(export_dir_4),
compat.as_bytes(most_recent_export_dir))
self.assertEqual(compat.as_bytes(export_dir_4),
os.path.join(compat.as_bytes(export_dir_base),
compat.as_bytes(
str(most_recent_export_version))))
def test_make_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
def _serving_input_fn():
return array_ops.constant([1]), None
export_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=_serving_input_fn,
default_output_alternative_key="default",
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=5)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def test_make_parsing_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
real_valued_col1 = fc.real_valued_column("real_valued_column1")
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
feature_columns = [sparse_col, embedding_col, real_valued_col1,
bucketized_col1]
export_strategy = saved_model_export_utils.make_parsing_export_strategy(
feature_columns=feature_columns)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def _create_test_export_dir(export_dir_base):
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
gfile.MkDir(export_dir)
time.sleep(2)
return export_dir
if __name__ == "__main__":
test.main()
| apache-2.0 | -6,080,868,683,663,989,000 | 3,440,757,425,435,577,000 | 41.546784 | 88 | 0.652326 | false |
castedo/celauth | celauth/providers.py | 1 | 4151 |
import urlparse
from openid.consumer import consumer
from openid.extensions import sreg, ax
from celauth import OpenIDCase
from celauth.dj.celauth.openid_store import DjangoOpenIDStore
class OpenIDChoices(object):
def __init__(self, data):
self.data = data
def ids(self, id_prefix=''):
return [id_prefix + x[0] for x in self.data]
def texts(self):
return [x[1] for x in self.data]
def urls_by_id(self, id_prefix=''):
return dict( (id_prefix + x[0], x[2]) for x in self.data )
OPENID_PROVIDERS = OpenIDChoices([
('google', 'Google', 'https://www.google.com/accounts/o8/id'),
('yahoo', 'Yahoo!', 'https://me.yahoo.com/'),
('aol', 'AOL', 'https://openid.aol.com/'),
('stackexchange', 'StackExchange', 'https://openid.stackexchange.com/'),
('launchpad', 'Launchpad', 'https://login.launchpad.net/'),
('intuit', 'Intuit', 'https://openid.intuit.com/openid/xrds'),
])
class TestOpenIDHelper:
def __init__(self, real):
self.case = None
self.real = real
def initial_response(self, request, user_url, return_url):
urlp = urlparse.urlparse(user_url)
if urlp.netloc not in ('example.com', 'example.org', 'example.net'):
return self.real.initial_response(request, user_url, return_url)
if urlp.fragment:
email = urlp.fragment + '@' + urlp.netloc
urlp = list(urlp)
urlp[5] = '' # remove fragment
user_url = urlparse.ParseResult(*urlp).geturl()
else:
email = None
self.case = OpenIDCase(user_url, user_url, email)
return return_url
def make_case(self, request):
if not self.case:
return self.real.make_case(request)
ret = self.case
self.case = None
return ret
EMAIL_AX_TYPE_URI = 'http://axschema.org/contact/email'
class LiveOpenIDHelper:
def _openid_consumer(self, request):
openid_store = DjangoOpenIDStore()
return consumer.Consumer(request.session, openid_store)
def initial_response(self, request, user_url, return_url):
oc = self._openid_consumer(request)
openid_request = oc.begin(user_url)
if openid_request.endpoint.supportsType(ax.AXMessage.ns_uri):
ax_request = ax.FetchRequest()
ax_request.add(ax.AttrInfo(EMAIL_AX_TYPE_URI,
alias='email',
required=True,
))
openid_request.addExtension(ax_request)
else:
sreg_request = sreg.SRegRequest(required=['email'],
optional=[],
)
openid_request.addExtension(sreg_request)
realm = request.build_absolute_uri('/')
if openid_request.shouldSendRedirect():
return openid_request.redirectURL(realm, return_url)
else:
return openid_request.htmlMarkup(realm, return_url)
def make_case(self, request):
oc = self._openid_consumer(request)
current_url = request.build_absolute_uri()
query_params = dict(request.REQUEST.items())
response = oc.complete(query_params, current_url)
if response.status == consumer.CANCEL:
return "OpenID sign in cancelled"
if response.status == consumer.SUCCESS:
email = None
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_response:
email = sreg_response.get('email', None)
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response:
email = ax_response.getSingle(EMAIL_AX_TYPE_URI, email)
return OpenIDCase(response.identity_url, response.getDisplayIdentifier(), email)
return response.message or "Internal openid library error" #should throw exception
facade = LiveOpenIDHelper()
def enable_test_openids():
global facade
facade = TestOpenIDHelper(facade)
| mit | -6,447,631,360,530,992,000 | -1,279,784,137,343,222,500 | 36.736364 | 92 | 0.589737 | false |
gisce/OCB | addons/google_base_account/google_base_account.py | 53 | 1297 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_users(osv.osv):
_inherit = "res.users"
_columns = {
'gmail_user': fields.char('Username', size=64,),
'gmail_password': fields.char('Password', size=64),
}
res_users()
# vim:expandtab:smartindent:toabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,758,795,939,808,952,600 | 6,377,792,928,059,953,000 | 38.30303 | 78 | 0.607556 | false |
hassanabidpk/django | tests/queries/models.py | 91 | 17678 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
'self',
models.SET_NULL,
blank=True, null=True,
related_name='children',
)
category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note, models.CASCADE)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo, models.CASCADE)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author, models.CASCADE)
note = models.ForeignKey(Note, models.CASCADE)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item, models.CASCADE)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y', models.CASCADE)
class Y(models.Model):
x1 = models.ForeignKey(X, models.CASCADE, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY', models.CASCADE)
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX, models.CASCADE)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self', models.CASCADE)
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, models.CASCADE, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk, models.CASCADE)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity, models.CASCADE)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA, models.CASCADE)
b = models.ForeignKey(LeafB, models.CASCADE)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, models.SET_NULL, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.CASCADE)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True)
objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True)
childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory, models.CASCADE)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory, models.CASCADE)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC, models.CASCADE)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, models.SET_NULL, null=True)
d = models.ForeignKey(ModelD, models.CASCADE)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, models.SET_NULL, to_field='name')
responsibility = models.ForeignKey('Responsibility', models.SET_NULL, to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, models.SET_NULL, null=True)
b = models.ForeignKey(FK2, models.SET_NULL, null=True)
c = models.ForeignKey(FK3, models.SET_NULL, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter', models.CASCADE)
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph', models.CASCADE)
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, models.SET_NULL, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, models.SET_NULL, related_name='owner')
creator = models.ForeignKey(BaseUser, models.SET_NULL, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, models.SET_NULL, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent, models.CASCADE)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company, models.CASCADE)
employee = models.ForeignKey(Person, models.CASCADE)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School, models.CASCADE)
class Classroom(models.Model):
school = models.ForeignKey(School, models.CASCADE)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605AParent(models.Model):
pass
class Ticket23605A(Ticket23605AParent):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
class Individual(models.Model):
alive = models.BooleanField()
class Meta:
db_table = 'Individual'
class RelatedIndividual(models.Model):
related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual')
class Meta:
db_table = 'RelatedIndividual'
| bsd-3-clause | -336,598,163,804,070,460 | -8,080,760,563,706,493,000 | 22.95393 | 103 | 0.688935 | false |
shahar-stratoscale/nova | nova/tests/objects/test_instance_group.py | 8 | 13653 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group
from nova import test
from nova.tests.objects import test_objects
from nova.tests import utils as tests_utils
class _TestInstanceGroupObjects(test.TestCase):
def setUp(self):
super(_TestInstanceGroupObjects, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members)
def test_get_by_uuid(self):
values = self._get_default_values()
metadata = {'key11': 'value1',
'key12': 'value2'}
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
metadata=metadata,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.metadetails, metadata)
self.assertEqual(obj_result.members, members)
self.assertEqual(obj_result.policies, policies)
def test_refresh(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, db_result['uuid'],
values)
obj_result.refresh()
self.assertEqual(obj_result.name, 'new_name')
self.assertEqual(set([]), obj_result.obj_what_changed())
def test_save_simple(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
obj_result.name = 'new_name'
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['name'], 'new_name')
def test_save_policies(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
policies = ['policy1', 'policy2']
obj_result.policies = policies
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['policies'], policies)
def test_save_members(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
members = ['instance1', 'instance2']
obj_result.members = members
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['members'], members)
def test_save_metadata(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
metadata = {'foo': 'bar'}
obj_result.metadetails = metadata
obj_result.save()
metadata1 = db.instance_group_metadata_get(self.context,
db_result['uuid'])
for key, value in metadata.iteritems():
self.assertEqual(value, metadata[key])
def test_create(self):
group1 = instance_group.InstanceGroup()
group1.uuid = 'fake-uuid'
group1.name = 'fake-name'
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.uuid, group2.uuid)
self.assertEqual(group1.name, group2.name)
result = db.instance_group_get(self.context, group1.uuid)
self.assertEqual(group1.id, result.id)
self.assertEqual(group1.uuid, result.uuid)
self.assertEqual(group1.name, result.name)
def test_create_with_policies(self):
group1 = instance_group.InstanceGroup()
group1.policies = ['policy1', 'policy2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.policies, group2.policies)
def test_create_with_members(self):
group1 = instance_group.InstanceGroup()
group1.members = ['instance1', 'instance2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.members, group2.members)
def test_create_with_metadata(self):
group1 = instance_group.InstanceGroup()
metadata = {'foo': 'bar'}
group1.metadetails = metadata
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
for key, value in metadata.iteritems():
self.assertEqual(value, group2.metadetails[key])
def test_recreate_fails(self):
group = instance_group.InstanceGroup()
group.create(self.context)
self.assertRaises(exception.ObjectActionError, group.create,
self.context)
def test_destroy(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
group = instance_group.InstanceGroup()
group.id = result.id
group.uuid = result.uuid
group.destroy(self.context)
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get, self.context, result['uuid'])
def _populate_instances(self):
instances = [(str(uuid.uuid4()), 'f1', 'p1'),
(str(uuid.uuid4()), 'f2', 'p1'),
(str(uuid.uuid4()), 'f3', 'p2'),
(str(uuid.uuid4()), 'f4', 'p2')]
for instance in instances:
values = self._get_default_values()
values['uuid'] = instance[0]
values['name'] = instance[1]
values['project_id'] = instance[2]
self._create_instance_group(self.context, values)
return instances
def test_list_all(self):
self._populate_instances()
inst_list = instance_group.InstanceGroupList.get_all(self.context)
groups = db.instance_group_get_all(self.context)
self.assertEqual(len(groups), len(inst_list.objects))
self.assertEqual(len(groups), 4)
for i in range(0, len(groups)):
self.assertIsInstance(inst_list.objects[i],
instance_group.InstanceGroup)
self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
def test_list_by_project_id(self):
self._populate_instances()
project_ids = ['p1', 'p2']
for id in project_ids:
il = instance_group.InstanceGroupList.get_by_project_id(
self.context, id)
groups = db.instance_group_get_all_by_project_id(self.context, id)
self.assertEqual(len(groups), len(il.objects))
self.assertEqual(len(groups), 2)
for i in range(0, len(groups)):
self.assertIsInstance(il.objects[i],
instance_group.InstanceGroup)
self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
self.assertEqual(il.objects[i].name, groups[i]['name'])
self.assertEqual(il.objects[i].project_id, id)
def test_get_by_name(self):
self._populate_instances()
ctxt = context.RequestContext('fake_user', 'p1')
ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1')
self.assertEqual('f1', ig.name)
def test_get_by_hint(self):
instances = self._populate_instances()
for instance in instances:
ctxt = context.RequestContext('fake_user', instance[2])
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1])
self.assertEqual(instance[1], ig.name)
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0])
self.assertEqual(instance[0], ig.uuid)
def test_add_members(self):
instance_ids = ['fakeid1', 'fakeid2']
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
members = instance_group.InstanceGroup.add_members(self.context,
group.uuid, instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
for instance in instance_ids:
self.assertIn(instance, members)
self.assertIn(instance, group.members)
def test_get_hosts(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = 'hostA'
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance3 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance3.host = 'hostB'
instance3.save()
instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts(self.context)
self.assertEqual(2, len(hosts))
self.assertIn('hostA', hosts)
self.assertIn('hostB', hosts)
hosts = group.get_hosts(self.context, exclude=[instance1.uuid])
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
def test_get_hosts_with_some_none(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = None
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance_ids = [instance1.uuid, instance2.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts(self.context)
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObjects):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObjects):
pass
| apache-2.0 | 4,845,410,840,674,765,000 | 1,635,952,936,598,508,300 | 43.327922 | 78 | 0.590273 | false |
mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/site/notify/SiteActivationNotification.py | 1 | 1716 | ##
##
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 09/10/14 #3623 randerso Manually created, do not regenerate
#
##
class SiteActivationNotification(object):
def __init__(self):
self.type = None
self.status = None
self.primarySite = None
self.modifiedSite = None
self.runMode = None
self.serverName = None
self.pluginName = None
def getType(self):
return self.type
def setType(self, type):
self.type = type
def getStatus(self):
return self.status
def setStatus(self, status):
self.status = status
def getPrimarySite(self):
return self.primarysite
def setPrimarySite(self, primarysite):
self.primarysite = primarysite
def getModifiedSite(self):
return self.modifiedSite
def setModifiedSite(self, modifiedSite):
self.modifiedSite = modifiedSite
def getRunMode(self):
return self.runMode
def setRunMode(self, runMode):
self.runMode = runMode
def getServerName(self):
return self.serverName
def setServerName(self, serverName):
self.serverName = serverName
def getPluginName(self):
return self.pluginName
def setPluginName(self, pluginName):
self.pluginName = pluginName
def __str__(self):
return self.pluginName.upper() + ":" \
+ self.status + ":" \
+ self.type + " " \
+ self.modifiedSite.upper() + " on " \
+ self.serverName + ":" \
+ self.runMode
| bsd-3-clause | -4,577,089,599,990,768,600 | -2,331,040,853,705,898,500 | 23.169014 | 85 | 0.556527 | false |
maheshp/novatest | nova/virt/baremetal/base.py | 10 | 2335 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import baremetal_states
class NodeDriver(object):
def __init__(self, virtapi):
self.virtapi = virtapi
def cache_images(self, context, node, instance, **kwargs):
raise NotImplementedError()
def destroy_images(self, context, node, instance):
raise NotImplementedError()
def activate_bootloader(self, context, node, instance):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
raise NotImplementedError()
def activate_node(self, context, node, instance):
"""For operations after power on."""
raise NotImplementedError()
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
raise NotImplementedError()
def get_console_output(self, node, instance):
raise NotImplementedError()
class PowerManager(object):
def __init__(self, **kwargs):
self.state = baremetal_states.DELETED
pass
def activate_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def reboot_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def deactivate_node(self):
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
"""Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
def start_console(self):
pass
def stop_console(self):
pass
| apache-2.0 | 4,562,471,515,739,270,700 | 3,879,945,938,193,929,700 | 28.935897 | 78 | 0.675803 | false |
kustodian/ansible-modules-core | commands/shell.py | 60 | 2743 | # There is actually no actual shell module source, when you use 'shell' in ansible,
# it runs the 'command' module with special arguments and it behaves differently.
# See the command source and the comment "#USE_SHELL".
DOCUMENTATION = '''
---
module: shell
short_description: Execute commands in nodes.
description:
- The M(shell) module takes the command name followed by a list of space-delimited arguments.
It is almost exactly like the M(command) module but runs
the command through a shell (C(/bin/sh)) on the remote node.
version_added: "0.2"
options:
free_form:
description:
- The shell module takes a free form command to run, as a string. There's not an actual
option named "free form". See the examples!
required: true
default: null
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
required: false
default: null
version_added: "0.6"
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
version_added: "1.8"
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best
judgement.
- To sanitize any variables passed to the shell module, you should use
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
requirements: [ ]
author: Michael DeHaan
'''
EXAMPLES = '''
# Execute the command in remote shell; stdout goes to the specified
# file on the remote.
- shell: somescript.sh >> somelog.txt
# Change the working directory to somedir/ before executing the command.
- shell: somescript.sh >> somelog.txt chdir=somedir/
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# somedir/somelog.txt doesn't exist.
- shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
creates: somelog.txt
'''
| gpl-3.0 | 480,783,537,258,215,360 | 8,097,683,639,238,538,000 | 34.166667 | 114 | 0.698141 | false |
reybalgs/PyRecipe-4-U | models/recipemodel.py | 1 | 3188 | ###############################################################################
#
# recipemodel.py
#
# Provides the class model for a recipe. The class model is passed around in
# the application proper.
#
###############################################################################
import simplejson as json
class RecipeModel():
def export_recipe(self):
"""
This function exports the current recipe object as a JSON-encoded
recipe (.rcpe) file.
Actually just returns a JSON-encoded string
"""
# Dump the object into a JSON-formatted string
json_recipe = json.dumps({"name":self.name,"course":self.course,
"serving_size":self.servingSize,"ingredients":self.ingredients,
"instructions":self.instructions,"images":self.images},
separators=(',',':'))
# Return the string
return json_recipe
def import_recipe(self, raw_json):
"""
Parses a JSON-encoded .rcpe file and then sets it to itself.
The string containing the [contents] of the JSON file is passed into
this function.
"""
# Put the decoded JSON string into a "raw" recipe object
raw_recipe = json.loads(raw_json)
print raw_recipe # print it for now
self.name = raw_recipe['name']
self.course = raw_recipe['course']
self.servingSize = raw_recipe['serving_size']
self.ingredients = raw_recipe['ingredients']
self.instructions = raw_recipe['instructions']
self.images = raw_recipe['images']
def print_recipe_information(self):
"""
A useful debugging function that prints the entirety of the recipe
"""
# Print basic information
print '\nName: ' + self.name
print 'Course: ' + self.course
print 'Serving Size: ' + str(self.servingSize)
# Print the ingredients
print '\nIngredients:'
if len(self.ingredients) == 0:
print 'No ingredients.'
else:
for ingredient in self.ingredients:
print(ingredient['name'] + str(ingredient['quantity']) +
ingredient['unit'])
# Print the instructions
print '\nInstructions:'
if len(self.instructions) == 0:
print 'No instructions.'
else:
for instruction in self.instructions:
print instruction
# Print the filepaths of the images
print '\nImage paths:'
if len(self.images) == 0:
print 'No images.'
else:
for filePath in self.images:
print filePath
def get_recipe(self, recipe):
"""
Assigns a given recipe to this recipe.
"""
self.name = recipe.name
self.course = recipe.course
self.servingSize = recipe.servingSize
self.ingredients = recipe.ingredients
self.instructions = recipe.instructions
def __init__(self):
self.name = 'noname'
self.course = 'none'
self.servingSize = 0
self.ingredients = []
self.instructions = []
self.images = []
| gpl-3.0 | -6,903,091,642,760,869,000 | -1,861,048,428,134,950,700 | 31.865979 | 79 | 0.553011 | false |
Entropy512/libsigrokdecode | decoders/eeprom93xx/__init__.py | 7 | 1168 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2017 Kevin Redon <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This decoder stacks on top of the 'microwire' PD and decodes the 93xx EEPROM
specific instructions.
The implemented instructions come from the STMicroelectronics M93Cx6 EEPROM
datasheet. They are compatible with the Atmel AT93Cxx EEPROM with slightly
different names.
Warning: Other EEPROMs using Microwire might have different operation codes
and instructions.
'''
from .pd import Decoder
| gpl-3.0 | 6,675,622,560,535,775,000 | 5,265,116,700,028,831,000 | 35.5 | 76 | 0.767123 | false |
rbuffat/pyidf | tests/test_controllerwatercoil.py | 1 | 2641 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.controllers import ControllerWaterCoil
log = logging.getLogger(__name__)
class TestControllerWaterCoil(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_controllerwatercoil(self):
pyidf.validation_level = ValidationLevel.error
obj = ControllerWaterCoil()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_control_variable = "Temperature"
obj.control_variable = var_control_variable
# alpha
var_action = "Normal"
obj.action = var_action
# alpha
var_actuator_variable = "Flow"
obj.actuator_variable = var_actuator_variable
# node
var_sensor_node_name = "node|Sensor Node Name"
obj.sensor_node_name = var_sensor_node_name
# node
var_actuator_node_name = "node|Actuator Node Name"
obj.actuator_node_name = var_actuator_node_name
# real
var_controller_convergence_tolerance = 7.7
obj.controller_convergence_tolerance = var_controller_convergence_tolerance
# real
var_maximum_actuated_flow = 8.8
obj.maximum_actuated_flow = var_maximum_actuated_flow
# real
var_minimum_actuated_flow = 9.9
obj.minimum_actuated_flow = var_minimum_actuated_flow
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.controllerwatercoils[0].name, var_name)
self.assertEqual(idf2.controllerwatercoils[0].control_variable, var_control_variable)
self.assertEqual(idf2.controllerwatercoils[0].action, var_action)
self.assertEqual(idf2.controllerwatercoils[0].actuator_variable, var_actuator_variable)
self.assertEqual(idf2.controllerwatercoils[0].sensor_node_name, var_sensor_node_name)
self.assertEqual(idf2.controllerwatercoils[0].actuator_node_name, var_actuator_node_name)
self.assertAlmostEqual(idf2.controllerwatercoils[0].controller_convergence_tolerance, var_controller_convergence_tolerance)
self.assertAlmostEqual(idf2.controllerwatercoils[0].maximum_actuated_flow, var_maximum_actuated_flow)
self.assertAlmostEqual(idf2.controllerwatercoils[0].minimum_actuated_flow, var_minimum_actuated_flow) | apache-2.0 | -3,881,033,094,186,534,400 | 229,012,760,234,302,100 | 36.742857 | 131 | 0.677395 | false |
zabracks/sshuttle | src/ssnet.py | 7 | 18201 | import struct
import socket
import errno
import select
import os
if not globals().get('skip_imports'):
from helpers import log, debug1, debug2, debug3, Fatal
MAX_CHANNEL = 65535
# these don't exist in the socket module in python 2.3!
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
HDR_LEN = 8
CMD_EXIT = 0x4200
CMD_PING = 0x4201
CMD_PONG = 0x4202
CMD_TCP_CONNECT = 0x4203
CMD_TCP_STOP_SENDING = 0x4204
CMD_TCP_EOF = 0x4205
CMD_TCP_DATA = 0x4206
CMD_ROUTES = 0x4207
CMD_HOST_REQ = 0x4208
CMD_HOST_LIST = 0x4209
CMD_DNS_REQ = 0x420a
CMD_DNS_RESPONSE = 0x420b
CMD_UDP_OPEN = 0x420c
CMD_UDP_DATA = 0x420d
CMD_UDP_CLOSE = 0x420e
cmd_to_name = {
CMD_EXIT: 'EXIT',
CMD_PING: 'PING',
CMD_PONG: 'PONG',
CMD_TCP_CONNECT: 'TCP_CONNECT',
CMD_TCP_STOP_SENDING: 'TCP_STOP_SENDING',
CMD_TCP_EOF: 'TCP_EOF',
CMD_TCP_DATA: 'TCP_DATA',
CMD_ROUTES: 'ROUTES',
CMD_HOST_REQ: 'HOST_REQ',
CMD_HOST_LIST: 'HOST_LIST',
CMD_DNS_REQ: 'DNS_REQ',
CMD_DNS_RESPONSE: 'DNS_RESPONSE',
CMD_UDP_OPEN: 'UDP_OPEN',
CMD_UDP_DATA: 'UDP_DATA',
CMD_UDP_CLOSE: 'UDP_CLOSE',
}
NET_ERRS = [errno.ECONNREFUSED, errno.ETIMEDOUT,
errno.EHOSTUNREACH, errno.ENETUNREACH,
errno.EHOSTDOWN, errno.ENETDOWN]
def _add(l, elem):
if not elem in l:
l.append(elem)
def _fds(l):
out = []
for i in l:
try:
out.append(i.fileno())
except AttributeError:
out.append(i)
out.sort()
return out
def _nb_clean(func, *args):
try:
return func(*args)
except OSError, e:
if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
else:
debug3('%s: err was: %s\n' % (func.__name__, e))
return None
def _try_peername(sock):
try:
pn = sock.getpeername()
if pn:
return '%s:%s' % (pn[0], pn[1])
except socket.error, e:
if e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK):
raise
return 'unknown'
_swcount = 0
class SockWrapper:
def __init__(self, rsock, wsock, connect_to=None, peername=None):
global _swcount
_swcount += 1
debug3('creating new SockWrapper (%d now exist)\n' % _swcount)
self.exc = None
self.rsock = rsock
self.wsock = wsock
self.shut_read = self.shut_write = False
self.buf = []
self.connect_to = connect_to
self.peername = peername or _try_peername(self.rsock)
self.try_connect()
def __del__(self):
global _swcount
_swcount -= 1
debug1('%r: deleting (%d remain)\n' % (self, _swcount))
if self.exc:
debug1('%r: error was: %s\n' % (self, self.exc))
def __repr__(self):
if self.rsock == self.wsock:
fds = '#%d' % self.rsock.fileno()
else:
fds = '#%d,%d' % (self.rsock.fileno(), self.wsock.fileno())
return 'SW%s:%s' % (fds, self.peername)
def seterr(self, e):
if not self.exc:
self.exc = e
self.nowrite()
self.noread()
def try_connect(self):
if self.connect_to and self.shut_write:
self.noread()
self.connect_to = None
if not self.connect_to:
return # already connected
self.rsock.setblocking(False)
debug3('%r: trying connect to %r\n' % (self, self.connect_to))
try:
self.rsock.connect(self.connect_to)
# connected successfully (Linux)
self.connect_to = None
except socket.error, e:
debug3('%r: connect result: %s\n' % (self, e))
if e.args[0] == errno.EINVAL:
# this is what happens when you call connect() on a socket
# that is now connected but returned EINPROGRESS last time,
# on BSD, on python pre-2.5.1. We need to use getsockopt()
# to get the "real" error. Later pythons do this
# automatically, so this code won't run.
realerr = self.rsock.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
e = socket.error(realerr, os.strerror(realerr))
debug3('%r: fixed connect result: %s\n' % (self, e))
if e.args[0] in [errno.EINPROGRESS, errno.EALREADY]:
pass # not connected yet
elif e.args[0] == 0:
# connected successfully (weird Linux bug?)
# Sometimes Linux seems to return EINVAL when it isn't
# invalid. This *may* be caused by a race condition
# between connect() and getsockopt(SO_ERROR) (ie. it
# finishes connecting in between the two, so there is no
# longer an error). However, I'm not sure of that.
#
# I did get at least one report that the problem went away
# when we added this, however.
self.connect_to = None
elif e.args[0] == errno.EISCONN:
# connected successfully (BSD)
self.connect_to = None
elif e.args[0] in NET_ERRS + [errno.EACCES, errno.EPERM]:
# a "normal" kind of error
self.connect_to = None
self.seterr(e)
else:
raise # error we've never heard of?! barf completely.
def noread(self):
if not self.shut_read:
debug2('%r: done reading\n' % self)
self.shut_read = True
# self.rsock.shutdown(SHUT_RD) # doesn't do anything anyway
def nowrite(self):
if not self.shut_write:
debug2('%r: done writing\n' % self)
self.shut_write = True
try:
self.wsock.shutdown(SHUT_WR)
except socket.error, e:
self.seterr('nowrite: %s' % e)
def too_full(self):
return False # fullness is determined by the socket's select() state
def uwrite(self, buf):
if self.connect_to:
return 0 # still connecting
self.wsock.setblocking(False)
try:
return _nb_clean(os.write, self.wsock.fileno(), buf)
except OSError, e:
if e.errno == errno.EPIPE:
debug1('%r: uwrite: got EPIPE\n' % self)
self.nowrite()
return 0
else:
# unexpected error... stream is dead
self.seterr('uwrite: %s' % e)
return 0
def write(self, buf):
assert(buf)
return self.uwrite(buf)
def uread(self):
if self.connect_to:
return None # still connecting
if self.shut_read:
return
self.rsock.setblocking(False)
try:
return _nb_clean(os.read, self.rsock.fileno(), 65536)
except OSError, e:
self.seterr('uread: %s' % e)
return '' # unexpected error... we'll call it EOF
def fill(self):
if self.buf:
return
rb = self.uread()
if rb:
self.buf.append(rb)
if rb == '': # empty string means EOF; None means temporarily empty
self.noread()
def copy_to(self, outwrap):
if self.buf and self.buf[0]:
wrote = outwrap.write(self.buf[0])
self.buf[0] = self.buf[0][wrote:]
while self.buf and not self.buf[0]:
self.buf.pop(0)
if not self.buf and self.shut_read:
outwrap.nowrite()
class Handler:
def __init__(self, socks=None, callback=None):
self.ok = True
self.socks = socks or []
if callback:
self.callback = callback
def pre_select(self, r, w, x):
for i in self.socks:
_add(r, i)
def callback(self):
log('--no callback defined-- %r\n' % self)
(r, w, x) = select.select(self.socks, [], [], 0)
for s in r:
v = s.recv(4096)
if not v:
log('--closed-- %r\n' % self)
self.socks = []
self.ok = False
class Proxy(Handler):
def __init__(self, wrap1, wrap2):
Handler.__init__(self, [wrap1.rsock, wrap1.wsock,
wrap2.rsock, wrap2.wsock])
self.wrap1 = wrap1
self.wrap2 = wrap2
def pre_select(self, r, w, x):
if self.wrap1.shut_write:
self.wrap2.noread()
if self.wrap2.shut_write:
self.wrap1.noread()
if self.wrap1.connect_to:
_add(w, self.wrap1.rsock)
elif self.wrap1.buf:
if not self.wrap2.too_full():
_add(w, self.wrap2.wsock)
elif not self.wrap1.shut_read:
_add(r, self.wrap1.rsock)
if self.wrap2.connect_to:
_add(w, self.wrap2.rsock)
elif self.wrap2.buf:
if not self.wrap1.too_full():
_add(w, self.wrap1.wsock)
elif not self.wrap2.shut_read:
_add(r, self.wrap2.rsock)
def callback(self):
self.wrap1.try_connect()
self.wrap2.try_connect()
self.wrap1.fill()
self.wrap2.fill()
self.wrap1.copy_to(self.wrap2)
self.wrap2.copy_to(self.wrap1)
if self.wrap1.buf and self.wrap2.shut_write:
self.wrap1.buf = []
self.wrap1.noread()
if self.wrap2.buf and self.wrap1.shut_write:
self.wrap2.buf = []
self.wrap2.noread()
if (self.wrap1.shut_read and self.wrap2.shut_read and
not self.wrap1.buf and not self.wrap2.buf):
self.ok = False
self.wrap1.nowrite()
self.wrap2.nowrite()
class Mux(Handler):
def __init__(self, rsock, wsock):
Handler.__init__(self, [rsock, wsock])
self.rsock = rsock
self.wsock = wsock
self.new_channel = self.got_dns_req = self.got_routes = None
self.got_udp_open = self.got_udp_data = self.got_udp_close = None
self.got_host_req = self.got_host_list = None
self.channels = {}
self.chani = 0
self.want = 0
self.inbuf = ''
self.outbuf = []
self.fullness = 0
self.too_full = False
self.send(0, CMD_PING, 'chicken')
def next_channel(self):
# channel 0 is special, so we never allocate it
for timeout in xrange(1024):
self.chani += 1
if self.chani > MAX_CHANNEL:
self.chani = 1
if not self.channels.get(self.chani):
return self.chani
def amount_queued(self):
total = 0
for b in self.outbuf:
total += len(b)
return total
def check_fullness(self):
if self.fullness > 32768:
if not self.too_full:
self.send(0, CMD_PING, 'rttest')
self.too_full = True
#ob = []
# for b in self.outbuf:
# (s1,s2,c) = struct.unpack('!ccH', b[:4])
# ob.append(c)
#log('outbuf: %d %r\n' % (self.amount_queued(), ob))
def send(self, channel, cmd, data):
data = str(data)
assert(len(data) <= 65535)
p = struct.pack('!ccHHH', 'S', 'S', channel, cmd, len(data)) + data
self.outbuf.append(p)
debug2(' > channel=%d cmd=%s len=%d (fullness=%d)\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)),
len(data), self.fullness))
self.fullness += len(data)
def got_packet(self, channel, cmd, data):
debug2('< channel=%d cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
if cmd == CMD_PING:
self.send(0, CMD_PONG, data)
elif cmd == CMD_PONG:
debug2('received PING response\n')
self.too_full = False
self.fullness = 0
elif cmd == CMD_EXIT:
self.ok = False
elif cmd == CMD_TCP_CONNECT:
assert(not self.channels.get(channel))
if self.new_channel:
self.new_channel(channel, data)
elif cmd == CMD_DNS_REQ:
assert(not self.channels.get(channel))
if self.got_dns_req:
self.got_dns_req(channel, data)
elif cmd == CMD_UDP_OPEN:
assert(not self.channels.get(channel))
if self.got_udp_open:
self.got_udp_open(channel, data)
elif cmd == CMD_ROUTES:
if self.got_routes:
self.got_routes(data)
else:
raise Exception('got CMD_ROUTES without got_routes?')
elif cmd == CMD_HOST_REQ:
if self.got_host_req:
self.got_host_req(data)
else:
raise Exception('got CMD_HOST_REQ without got_host_req?')
elif cmd == CMD_HOST_LIST:
if self.got_host_list:
self.got_host_list(data)
else:
raise Exception('got CMD_HOST_LIST without got_host_list?')
else:
callback = self.channels.get(channel)
if not callback:
log('warning: closed channel %d got cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
else:
callback(cmd, data)
def flush(self):
self.wsock.setblocking(False)
if self.outbuf and self.outbuf[0]:
wrote = _nb_clean(os.write, self.wsock.fileno(), self.outbuf[0])
debug2('mux wrote: %r/%d\n' % (wrote, len(self.outbuf[0])))
if wrote:
self.outbuf[0] = self.outbuf[0][wrote:]
while self.outbuf and not self.outbuf[0]:
self.outbuf[0:1] = []
def fill(self):
self.rsock.setblocking(False)
try:
b = _nb_clean(os.read, self.rsock.fileno(), 32768)
except OSError, e:
raise Fatal('other end: %r' % e)
#log('<<< %r\n' % b)
if b == '': # EOF
self.ok = False
if b:
self.inbuf += b
def handle(self):
self.fill()
# log('inbuf is: (%d,%d) %r\n'
# % (self.want, len(self.inbuf), self.inbuf))
while 1:
if len(self.inbuf) >= (self.want or HDR_LEN):
(s1, s2, channel, cmd, datalen) = \
struct.unpack('!ccHHH', self.inbuf[:HDR_LEN])
assert(s1 == 'S')
assert(s2 == 'S')
self.want = datalen + HDR_LEN
if self.want and len(self.inbuf) >= self.want:
data = self.inbuf[HDR_LEN:self.want]
self.inbuf = self.inbuf[self.want:]
self.want = 0
self.got_packet(channel, cmd, data)
else:
break
def pre_select(self, r, w, x):
_add(r, self.rsock)
if self.outbuf:
_add(w, self.wsock)
def callback(self):
(r, w, x) = select.select([self.rsock], [self.wsock], [], 0)
if self.rsock in r:
self.handle()
if self.outbuf and self.wsock in w:
self.flush()
class MuxWrapper(SockWrapper):
def __init__(self, mux, channel):
SockWrapper.__init__(self, mux.rsock, mux.wsock)
self.mux = mux
self.channel = channel
self.mux.channels[channel] = self.got_packet
self.socks = []
debug2('new channel: %d\n' % channel)
def __del__(self):
self.nowrite()
SockWrapper.__del__(self)
def __repr__(self):
return 'SW%r:Mux#%d' % (self.peername, self.channel)
def noread(self):
if not self.shut_read:
self.shut_read = True
self.mux.send(self.channel, CMD_TCP_STOP_SENDING, '')
self.maybe_close()
def nowrite(self):
if not self.shut_write:
self.shut_write = True
self.mux.send(self.channel, CMD_TCP_EOF, '')
self.maybe_close()
def maybe_close(self):
if self.shut_read and self.shut_write:
# remove the mux's reference to us. The python garbage collector
# will then be able to reap our object.
self.mux.channels[self.channel] = None
def too_full(self):
return self.mux.too_full
def uwrite(self, buf):
if self.mux.too_full:
return 0 # too much already enqueued
if len(buf) > 2048:
buf = buf[:2048]
self.mux.send(self.channel, CMD_TCP_DATA, buf)
return len(buf)
def uread(self):
if self.shut_read:
return '' # EOF
else:
return None # no data available right now
def got_packet(self, cmd, data):
if cmd == CMD_TCP_EOF:
self.noread()
elif cmd == CMD_TCP_STOP_SENDING:
self.nowrite()
elif cmd == CMD_TCP_DATA:
self.buf.append(data)
else:
raise Exception('unknown command %d (%d bytes)'
% (cmd, len(data)))
def connect_dst(family, ip, port):
debug2('Connecting to %s:%d\n' % (ip, port))
outsock = socket.socket(family)
outsock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
return SockWrapper(outsock, outsock,
connect_to=(ip, port),
peername = '%s:%d' % (ip, port))
def runonce(handlers, mux):
r = []
w = []
x = []
to_remove = filter(lambda s: not s.ok, handlers)
for h in to_remove:
handlers.remove(h)
for s in handlers:
s.pre_select(r, w, x)
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)\n'
% (len(handlers), _fds(r), _fds(w), _fds(x),
mux.fullness, mux.too_full))
(r, w, x) = select.select(r, w, x)
debug2(' Ready: %d r=%r w=%r x=%r\n'
% (len(handlers), _fds(r), _fds(w), _fds(x)))
ready = r + w + x
did = {}
for h in handlers:
for s in h.socks:
if s in ready:
h.callback()
did[s] = 1
for s in ready:
if not s in did:
raise Fatal('socket %r was not used by any handler' % s)
| lgpl-2.1 | 8,393,661,544,165,015,000 | 7,107,963,210,000,616,000 | 30.544194 | 77 | 0.518708 | false |
goanpeca/mongokit | tests/test_versioned.py | 3 | 15067 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from mongokit import *
class VersionedTestCase(unittest.TestCase):
def setUp(self):
self.connection = Connection()
self.col = self.connection['test']['mongokit']
def tearDown(self):
self.connection['test'].drop_collection('mongokit')
self.connection['test'].drop_collection('versioned_mongokit')
self.connection['test'].drop_collection('versioned_mongokit2')
self.connection['versioned_test'].drop_collection('versioned_mongokit')
def test_save_versioning(self):
class MyDoc(Document):
structure = {
"bla" : unicode,
}
self.connection.register([MyDoc])
doc = self.col.MyDoc()
doc['bla'] = u"bli"
doc.save()
assert "_revision" not in doc
doc.delete()
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
docs = list(self.col.find())
assert len(docs) == 1
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
assert versioned_doc['_revision'] == 1
assert versioned_doc.get_last_revision_id() == 1
assert versioned_doc.get_revision(1) == {'foo':'bla', "_revision":1, "_id":"mydoc"}
versioned_doc['foo'] = u'bar'
versioned_doc.save()
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 2
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
assert ver_doc[1]['id'] == 'mydoc'
assert ver_doc[1]['revision'] == 2
assert ver_doc[1]['doc'] == {u'_revision': 2, u'foo': u'bar', u'_id': u'mydoc'}
assert versioned_doc['_revision'] == 2
assert versioned_doc.get_last_revision_id() == 2
assert versioned_doc['foo'] == 'bar'
assert versioned_doc.get_revision(2) == {'foo':'bar', "_revision":2, "_id":"mydoc"}, versioned_doc.get_revision(2)
old_doc = versioned_doc.get_revision(1)
print old_doc, type(old_doc)
old_doc.save()
assert old_doc['_revision'] == 3
versioned_doc = self.connection.test.mongokit.MyVersionedDoc.get_from_id(versioned_doc['_id'])
assert len(list(versioned_doc.get_revisions())) == 3, len(list(versioned_doc.get_revisions()))
def test_save_without_versionning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save(versioning=False)
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 0
assert self.col.find().count() == 1
def test_save_versioning_without_id(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['foo'] = u'bla'
versioned_doc.save()
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 1
assert 'doc' in ver_doc[0]
assert 'revision' in ver_doc[0], ver_doc[0]
ver_doc = list(self.col.find())
assert len(ver_doc) == 1
assert 'doc' not in ver_doc[0]
assert '_revision' in ver_doc[0]
def _test_bad_versioning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
self.assertRaises(ValidationError, MyVersionedDoc)
def test_delete_versioning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 1
versioned_doc['foo'] = u'bar'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2
versioned_doc.delete(versioning=True)
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 0
assert self.col.MyVersionedDoc.find().count() == 0
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 1
versioned_doc['foo'] = u'bar'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2
versioned_doc.delete()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2
assert self.col.MyVersionedDoc.find().count() == 0
def test_remove_versioning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
versioned_doc2 = self.col.MyVersionedDoc()
versioned_doc2['_id'] = "mydoc2"
versioned_doc2['foo'] = u'bla'
versioned_doc2.save()
versioned_doc3 = self.col.MyVersionedDoc()
versioned_doc3['_id'] = "mydoc3"
versioned_doc3['foo'] = u'bla'
versioned_doc3.save()
versioned_doc['foo'] = u'bar'
versioned_doc.save()
versioned_doc2['foo'] = u'bar'
versioned_doc2.save()
versioned_doc3['foo'] = u'bar'
versioned_doc3.save()
count = self.col.MyVersionedDoc.versioning_collection.find().count()
assert count == 6, count
count = self.col.MyVersionedDoc.collection.find().count()
assert count == 3, count
versioned_doc.remove({'foo':'bar'}, versioning=True)
count = self.col.MyVersionedDoc.versioning_collection.find().count()
assert count == 0, count
count = self.col.MyVersionedDoc.collection.find().count()
assert count == 0, count
def _test_versioning_with_dynamic_db(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
ver_mongokit2 = list(CONNECTION['versioned_test']['versioned_mongokit'].find())
assert len(ver_mongokit2) == 0, len(ver_mongokit2)
versioned_doc2 = MyVersionedDoc(versioning_db_name="versioned_test")
versioned_doc2['_id'] = "mydoc2"
versioned_doc2['foo'] = u'bla'
versioned_doc2.save()
ver_mongokit = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_mongokit) == 1, len(ver_mongokit)
ver_doc = list(CONNECTION['versioned_test']['versioned_mongokit'].find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc2'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc2'}
versioned_doc['foo'] = u'bar'
versioned_doc.save()
ver_doc = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_doc) == 2
ver_doc = list(CONNECTION['versioned_test']['versioned_mongokit'].find())
assert len(ver_doc) == 1
def _test_versioning_with_dynamic_collection(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
versioning_collection_name = "versioned_mongokit"
versioned_doc = MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
ver_doc = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
ver_mongokit2 = list(CONNECTION['test']['versioned_mongokit2'].find())
assert len(ver_mongokit2) == 0
versioned_doc2 = MyVersionedDoc(versioning_collection_name="versioned_mongokit2")
versioned_doc2['_id'] = "mydoc2"
versioned_doc2['foo'] = u'bla'
versioned_doc2.save()
ver_mongokit = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_mongokit) == 1, len(ver_mongokit)
ver_doc = list(CONNECTION['test']['versioned_mongokit2'].find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc2'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc2'}
versioned_doc['foo'] = u'bar'
versioned_doc.save()
ver_doc = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_doc) == 2
ver_doc = list(CONNECTION['test']['versioned_mongokit2'].find())
assert len(ver_doc) == 1
def test_versioning_without_versioning_collection_name(self):
test_passed = False
try:
class Group(VersionedDocument):
use_autorefs = True
structure = {
'name':unicode,
'members':[User], #users
}
except:
test_passed = True
assert test_passed
def test_resave_versioned_doc_with_objectId(self):
"""
1. Create a simple VersionedDocument using the defaults, thus using the
built-in objectID.
2. save to the database
3. change the VersionedDocument contents (leave _id unchanged)
4. resave to the database
4a. the save action will search for the get_last_revision_id
4b. add +1 to the _revision attribute
4c. save the revised document, save the old document in the
versioned_* collection
4a BREAKS!
self['_revision'] = self.get_last_revision_id()
File "...\mongokit\versioned_document.py", line 100, in get_last_revision_id
{'id':self['_id']}).sort('revision', -1).next()
File "...\mongokit\cursor.py", line 44, in next
raise StopIteration
"""
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['foo'] = u'bla'
versioned_doc.save()
docs = list(self.col.find())
assert len(docs) == 1
versioned_doc['foo'] = u'Some Other bla'
versioned_doc.save()
print(versioned_doc)
def test_resave_versioned_doc_with_UUID(self):
"""
Simple versioning test, a bit different than the test_save_versionning
"""
class MyVersionedUUIDDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
def save(self, versioning=True, uuid=True, *args, **kwargs):
""" Ensure that the save is performed using uuid=True """
return super(MyVersionedUUIDDoc, self).save(versioning, uuid, *args, **kwargs)
self.connection.register([MyVersionedUUIDDoc])
versioned_doc = self.col.MyVersionedUUIDDoc()
versioned_doc['foo'] = u'bla'
versioned_doc.save()
docs = list(self.col.find())
assert len(docs) == 1
versioned_doc['foo'] = u'Some Other bla'
versioned_doc.save()
# search for the versioned_doc in the database and compare id's
ver_doc = list(self.connection.test.mongokit.find())
assert len(ver_doc) == 1
assert ver_doc[0]['_revision'] == 2
assert ver_doc[0]['foo'] == u'Some Other bla'
assert ver_doc[0]['_id'][:18] == u'MyVersionedUUIDDoc'
assert ver_doc[0]['_id'] == versioned_doc['_id']
| bsd-3-clause | -3,109,706,624,421,959,000 | -5,011,598,584,055,493,000 | 37.932817 | 122 | 0.595208 | false |
collective/eden | modules/s3db/doc.py | 2 | 32300 | # -*- coding: utf-8 -*-
""" Sahana Eden Document Library
@copyright: 2011-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DocumentLibrary",
"S3DocSitRepModel",
"doc_image_represent",
"doc_document_list_layout",
)
import os
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3DocumentLibrary(S3Model):
names = ("doc_entity",
"doc_document",
"doc_document_id",
"doc_image",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
person_comment = self.pr_person_comment
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
# Shortcuts
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
folder = current.request.folder
super_link = self.super_link
# ---------------------------------------------------------------------
# Document-referencing entities
#
entity_types = Storage(asset_asset=T("Asset"),
cms_post=T("Post"),
cr_shelter=T("Shelter"),
deploy_mission=T("Mission"),
doc_sitrep=T("Situation Report"),
event_incident=T("Incident"),
event_incident_report=T("Incident Report"),
hms_hospital=T("Hospital"),
hrm_human_resource=T("Human Resource"),
inv_adj=T("Stock Adjustment"),
inv_warehouse=T("Warehouse"),
# @ToDo: Deprecate
irs_ireport=T("Incident Report"),
pr_group=T("Team"),
project_project=T("Project"),
project_activity=T("Project Activity"),
project_framework=T("Project Framework"),
project_task=T("Task"),
org_office=T("Office"),
org_facility=T("Facility"),
org_group=T("Organization Group"),
# @ToDo: Deprecate
stats_people=T("People"),
vulnerability_document=T("Vulnerability Document"),
vulnerability_risk=T("Risk"),
vulnerability_evac_route=T("Evacuation Route"),
)
tablename = "doc_entity"
self.super_entity(tablename, "doc_id", entity_types)
# Components
doc_id = "doc_id"
self.add_components(tablename,
doc_document = doc_id,
doc_image = doc_id,
)
# ---------------------------------------------------------------------
# Documents
#
tablename = "doc_document"
define_table(tablename,
# Instance
self.stats_source_superlink,
# Component not instance
super_link(doc_id, "doc_entity"),
# @ToDo: Remove since Site Instances are doc entities?
super_link("site_id", "org_site"),
Field("file", "upload",
autodelete = True,
represent = self.doc_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
# Allow Name to be added onvalidation
requires = IS_EMPTY_OR(IS_LENGTH(128)),
label = T("Name")
),
Field("url",
label = T("URL"),
represent = lambda url: \
url and A(url, _href=url) or NONE,
requires = IS_EMPTY_OR(IS_URL()),
),
Field("has_been_indexed", "boolean",
default = False,
readable = False,
writable = False,
),
person_id(
# Enable when-required
label = T("Author"),
readable = False,
writable = False,
comment = person_comment(T("Author"),
T("The Author of this Document (optional)"))
),
organisation_id(# Enable when-required
readable = False,
writable = False,
),
s3_date(label = T("Date Published"),
),
# @ToDo: Move location to link table
location_id(# Enable when-required
readable = False,
writable = False,
),
s3_comments(),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Reference Document"),
title_display = T("Document Details"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List Documents"),
label_delete_button = T("Delete Document"),
msg_record_created = T("Document added"),
msg_record_modified = T("Document updated"),
msg_record_deleted = T("Document deleted"),
msg_list_empty = T("No Documents found")
)
# Search Method
# Resource Configuration
if current.deployment_settings.get_base_solr_url():
onaccept = self.document_onaccept
ondelete = self.document_ondelete
else:
onaccept = None
ondelete = None
configure(tablename,
context = {"organisation": "organisation_id",
"person": "person_id",
"site": "site_id",
},
deduplicate = self.document_duplicate,
list_layout = doc_document_list_layout,
onaccept = onaccept,
ondelete = ondelete,
onvalidation = self.document_onvalidation,
super_entity = "stats_source",
)
# Reusable field
represent = doc_DocumentRepresent(lookup = tablename,
fields = ["name", "file", "url"],
labels = "%(name)s",
show_link = True)
document_id = S3ReusableField("document_id", "reference %s" % tablename,
label = T("Document"),
ondelete = "CASCADE",
represent = represent,
requires = IS_ONE_OF(db,
"doc_document.id",
represent),
)
# ---------------------------------------------------------------------
# Images
#
# @ToDo: Field to determine which is the default image to use for
# e.g. a Map popup (like the profile picture)
# readable/writable=False except in the cases where-needed
#
doc_image_type_opts = {1: T("Photograph"),
2: T("Map"),
3: T("Document Scan"),
99: T("other")
}
tablename = "doc_image"
define_table(tablename,
# Component not instance
super_link(doc_id, "doc_entity"),
super_link("pe_id", "pr_pentity"), # @ToDo: Remove & make Persons doc entities instead?
super_link("site_id", "org_site"), # @ToDo: Remove since Site Instances are doc entities?
Field("file", "upload", autodelete=True,
represent = doc_image_represent,
requires = IS_EMPTY_OR(
IS_IMAGE(extensions=(s3.IMAGE_EXTENSIONS)),
# Distingish from prepop
null = "",
),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads",
"images"),
widget = S3ImageCropWidget((600, 600)),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
label = T("Name"),
# Allow Name to be added onvalidation
requires = IS_EMPTY_OR(IS_LENGTH(128)),
),
Field("url",
label = T("URL"),
requires = IS_EMPTY_OR(IS_URL()),
),
Field("type", "integer",
default = 1,
label = T("Image Type"),
represent = lambda opt: \
doc_image_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(doc_image_type_opts,
zero=None),
),
person_id(label = T("Author"),
),
organisation_id(),
s3_date(label = T("Date Taken"),
),
# @ToDo: Move location to link table
location_id(),
s3_comments(),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Photo"),
title_display = T("Photo Details"),
title_list = T("Photos"),
title_update = T("Edit Photo"),
label_list_button = T("List Photos"),
label_delete_button = T("Delete Photo"),
msg_record_created = T("Photo added"),
msg_record_modified = T("Photo updated"),
msg_record_deleted = T("Photo deleted"),
msg_list_empty = T("No Photos found"))
# Resource Configuration
configure(tablename,
deduplicate = self.document_duplicate,
onvalidation = lambda form: \
self.document_onvalidation(form, document=False)
)
# ---------------------------------------------------------------------
# Pass model-global names to response.s3
#
return dict(doc_document_id = document_id,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if the module is disabled """
document_id = S3ReusableField("document_id", "integer",
readable=False, writable=False)
return dict(doc_document_id = document_id,
)
# -------------------------------------------------------------------------
@staticmethod
def doc_file_represent(file):
""" File representation """
if file:
try:
# Read the filename from the file
filename = current.db.doc_document.file.retrieve(file)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href=URL(c="default", f="download", args=[file]))
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def document_duplicate(item):
""" Import item de-duplication """
data = item.data
query = None
file = data.get("file")
if file:
table = item.table
query = (table.file == file)
else:
url = data.get("url")
if url:
table = item.table
query = (table.url == url)
if query:
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# -------------------------------------------------------------------------
@staticmethod
def document_onvalidation(form, document=True):
""" Form validation for both, documents and images """
form_vars = form.vars
doc = form_vars.file
if doc is None:
# If this is a prepop, then file not in form
# Interactive forms with empty doc has this as "" not None
return
if not document:
encoded_file = form_vars.get("imagecrop-data", None)
if encoded_file:
# S3ImageCropWidget
import base64
import uuid
metadata, encoded_file = encoded_file.split(",")
filename, datatype, enctype = metadata.split(";")
f = Storage()
f.filename = uuid.uuid4().hex + filename
import cStringIO
f.file = cStringIO.StringIO(base64.decodestring(encoded_file))
form_vars.file = f
if not form_vars.name:
form_vars.name = filename
if not hasattr(doc, "file") and not doc and not form_vars.url:
if document:
msg = current.T("Either file upload or document URL required.")
else:
msg = current.T("Either file upload or image URL required.")
form.errors.file = msg
form.errors.url = msg
if hasattr(doc, "file"):
name = form_vars.name
if not name:
# Use the filename
form_vars.name = doc.filename
else:
id = current.request.post_vars.id
if id:
if document:
tablename = "doc_document"
else:
tablename = "doc_image"
db = current.db
table = db[tablename]
record = db(table.id == id).select(table.file,
limitby=(0, 1)).first()
if record:
name = form_vars.name
if not name:
# Use the filename
form_vars.name = table.file.retrieve(record.file)[0]
# Do a checksum on the file to see if it's a duplicate
#import cgi
#if isinstance(doc, cgi.FieldStorage) and doc.filename:
# f = doc.file
# form_vars.checksum = doc_checksum(f.read())
# f.seek(0)
# if not form_vars.name:
# form_vars.name = doc.filename
#if form_vars.checksum is not None:
# # Duplicate allowed if original version is deleted
# query = ((table.checksum == form_vars.checksum) & \
# (table.deleted == False))
# result = db(query).select(table.name,
# limitby=(0, 1)).first()
# if result:
# doc_name = result.name
# form.errors["file"] = "%s %s" % \
# (T("This file already exists on the server as"), doc_name)
# -------------------------------------------------------------------------
@staticmethod
def document_onaccept(form):
"""
Build a full-text index
"""
form_vars = form.vars
doc = form_vars.file
table = current.db.doc_document
document = json.dumps(dict(filename=doc,
name=table.file.retrieve(doc)[0],
id=form_vars.id,
))
current.s3task.async("document_create_index",
args = [document])
# -------------------------------------------------------------------------
@staticmethod
def document_ondelete(row):
"""
Remove the full-text index
"""
db = current.db
table = db.doc_document
record = db(table.id == row.id).select(table.file,
limitby=(0, 1)).first()
document = json.dumps(dict(filename=record.file,
id=row.id,
))
current.s3task.async("document_delete_index",
args = [document])
# =============================================================================
def doc_image_represent(filename):
"""
Represent an image as a clickable thumbnail
@param filename: name of the image file
"""
if not filename:
return current.messages["NONE"]
return DIV(A(IMG(_src=URL(c="default", f="download",
args=filename),
_height=40),
_class="zoom",
_href=URL(c="default", f="download",
args=filename)))
# @todo: implement/activate the JavaScript for this:
#import uuid
#anchor = "zoom-media-image-%s" % uuid.uuid4()
#return DIV(A(IMG(_src=URL(c="default", f="download",
#args=filename),
#_height=40),
#_class="zoom",
#_href="#%s" % anchor),
#DIV(IMG(_src=URL(c="default", f="download",
#args=filename),
#_width=600),
#_id="%s" % anchor,
#_class="hide"))
# =============================================================================
def doc_checksum(docstr):
""" Calculate a checksum for a file """
import hashlib
converted = hashlib.sha1(docstr).hexdigest()
return converted
# =============================================================================
def doc_document_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Documents, e.g. on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["doc_document.id"]
item_class = "thumbnail"
raw = record._row
title = record["doc_document.name"]
file = raw["doc_document.file"] or ""
url = raw["doc_document.url"] or ""
date = record["doc_document.date"]
comments = raw["doc_document.comments"] or ""
if file:
try:
doc_name = current.s3db.doc_document.file.retrieve(file)[0]
except (IOError, TypeError):
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[file])
body = P(I(_class="icon-paperclip"),
" ",
SPAN(A(doc_name,
_href=doc_url,
)
),
" ",
_class="card_1_line",
)
elif url:
body = P(I(_class="icon-globe"),
" ",
SPAN(A(url,
_href=url,
)),
" ",
_class="card_1_line",
)
else:
# Shouldn't happen!
body = P(_class="card_1_line")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.doc_document
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="doc", f="document",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Document"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(I(_class="icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(body,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class doc_DocumentRepresent(S3Represent):
""" Representation of Documents """
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (doc_document.id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
try:
filename = row["doc_document.file"]
url = row["doc_document.url"]
except AttributeError:
return v
else:
if filename:
url = URL(c="default", f="download", args=filename)
return A(v, _href=url)
elif url:
return A(v, _href=url)
return v
# =============================================================================
class S3DocSitRepModel(S3Model):
"""
Situation Reports
"""
names = ("doc_sitrep",
"doc_sitrep_id",
)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Situation Reports
# - can be aggregated by OU
#
tablename = "doc_sitrep"
self.define_table(tablename,
self.super_link("doc_id", "doc_entity"),
Field("name", length=128,
label = T("Name"),
),
Field("description", "text",
label = T("Description"),
represent = lambda body: XML(body),
widget = s3_richtext_widget,
),
self.org_organisation_id(),
self.gis_location_id(
widget = S3LocationSelector(show_map = False),
),
s3_date(default = "now",
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Situation Report"),
title_display = T("Situation Report Details"),
title_list = T("Situation Reports"),
title_update = T("Edit Situation Report"),
title_upload = T("Import Situation Reports"),
label_list_button = T("List Situation Reports"),
label_delete_button = T("Delete Situation Report"),
msg_record_created = T("Situation Report added"),
msg_record_modified = T("Situation Report updated"),
msg_record_deleted = T("Situation Report deleted"),
msg_list_empty = T("No Situation Reports currently registered"))
crud_form = S3SQLCustomForm("name",
"description",
"organisation_id",
"location_id",
"date",
S3SQLInlineComponent(
"document",
name = "document",
label = T("Attachments"),
fields = [("", "file")],
),
"comments",
)
if current.deployment_settings.get_org_branches():
org_filter = S3HierarchyFilter("organisation_id",
leafonly = False,
)
else:
org_filter = S3OptionsFilter("organisation_id",
#filter = True,
#header = "",
)
filter_widgets = [org_filter,
S3LocationFilter(),
S3DateFilter("date"),
]
self.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = ["date",
"event_sitrep.incident_id",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"organisation_id",
"name",
(T("Attachments"), "document.file"),
"comments",
],
super_entity = "doc_entity",
)
# Components
self.add_components(tablename,
event_sitrep = {"name": "event_sitrep",
"joinby": "sitrep_id",
},
event_incident = {"link": "event_sitrep",
"joinby": "sitrep_id",
"key": "incident_id",
"actuate": "hide",
"multiple": "False",
#"autocomplete": "name",
"autodelete": False,
},
)
represent = S3Represent(lookup=tablename)
sitrep_id = S3ReusableField("sitrep_id", "reference %s" % tablename,
label = T("Situation Report"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "doc_sitrep.id",
represent,
orderby="doc_sitrep.name",
sort=True)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(doc_sitrep_id = sitrep_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(doc_sitrep_id = lambda **attr: dummy("sitrep_id"),
)
# END =========================================================================
| mit | 6,805,888,070,437,306,000 | -2,560,829,168,547,677,700 | 38.827374 | 111 | 0.393963 | false |
Jonekee/chromium.src | tools/telemetry/telemetry/user_story/shared_user_story_state.py | 15 | 2183 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class SharedUserStoryState(object):
"""A class that manages the test state across multiple user stories.
It's styled on unittest.TestCase for handling test setup & teardown logic.
"""
def __init__(self, test, options, user_story_set):
""" This method is styled on unittest.TestCase.setUpClass.
Override to do any action before running user stories that
share this same state.
Args:
test: a page_test.PageTest instance.
options: a BrowserFinderOptions instance that contains command line
options.
user_story_set: a user_story_set.UserStorySet instance.
"""
pass
@property
def platform(self):
""" Override to return the platform which user stories that share this same
state will be run on.
"""
raise NotImplementedError()
def WillRunUserStory(self, user_story):
""" Override to do any action before running each one of all user stories
that share this same state.
This method is styled on unittest.TestCase.setUp.
"""
raise NotImplementedError()
def DidRunUserStory(self, results):
""" Override to do any action after running each of all user stories that
share this same state.
This method is styled on unittest.TestCase.tearDown.
"""
raise NotImplementedError()
def GetTestExpectationAndSkipValue(self, expectations):
""" Return test expectation and skip value instance in case expectation
is 'skip'. This is run after WillRunUserStory and before RunUserStory.
"""
raise NotImplementedError()
def RunUserStory(self, results):
""" Override to do any action before running each one of all user stories
that share this same state.
This method is styled on unittest.TestCase.run.
"""
raise NotImplementedError()
def TearDownState(self, results):
""" Override to do any action after running multiple user stories that
share this same state.
This method is styled on unittest.TestCase.tearDownClass.
"""
raise NotImplementedError()
| bsd-3-clause | -4,381,534,517,438,860,300 | 3,716,362,873,711,121,000 | 33.650794 | 79 | 0.717361 | false |
openstack/sahara | sahara/service/api/v2/data_sources.py | 4 | 1194 | # Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara import conductor as c
from sahara import context
conductor = c.API
def get_data_sources(**kwargs):
return conductor.data_source_get_all(context.ctx(),
regex_search=True, **kwargs)
def get_data_source(id):
return conductor.data_source_get(context.ctx(), id)
def delete_data_source(id):
conductor.data_source_destroy(context.ctx(), id)
def register_data_source(values):
return conductor.data_source_create(context.ctx(), values)
def data_source_update(id, values):
return conductor.data_source_update(context.ctx(), id, values)
| apache-2.0 | -3,119,659,131,828,179,000 | -5,376,280,360,652,087,000 | 28.121951 | 69 | 0.71608 | false |
evansd/django | django/template/base.py | 15 | 38221 | """
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
import logging
import re
from inspect import getcallargs, getfullargspec
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import gettext_lazy, pgettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % self.params
class Origin:
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, Origin):
return False
return (
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template:
def __init__(self, template_string, origin=None, name=None, engine=None):
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = template_string
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
yield from node
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
self.origin,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token:
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinel = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinel):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer:
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content, position, lineno)
else:
token = Token(TOKEN_TEXT, token_string, position, lineno)
return token
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser:
def __init__(self, tokens, libraries=None, builtins=None, origin=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
self.origin = origin
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag on line %d' % token.lineno)
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set origin and token here since we can't modify the node __init__()
# method.
node.token = token
node.origin = self.origin
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(
token,
"Invalid block tag on line %d: '%s', expected %s. Did you "
"forget to register or load this tag?" % (
token.lineno,
command,
get_text_list(["'%s'" % p for p in parse_until], 'or'),
),
)
raise self.error(
token,
"Invalid block tag on line %d: '%s'. Did you forget to register "
"or load this tag?" % (token.lineno, command)
)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % (
token.lineno,
command,
', '.join(parse_until),
)
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': r'\w\.',
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.VERBOSE)
class FilterExpression:
"""
Parse a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, _, _, defaults, _, _, _ = getfullargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
class Variable:
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, str):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return gettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Perform resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, BaseContext) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.debug(
"Exception while resolving variable '%s' in template '%s'.",
bit,
template_name,
exc_info=True,
)
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node:
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.render_context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(str(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.s[:25])
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
if context.autoescape:
if not issubclass(type(value), str):
value = str(value)
return conditional_escape(value)
else:
return str(value)
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
| bsd-3-clause | -3,231,207,475,730,031,600 | 4,202,816,132,482,942,000 | 35.400952 | 106 | 0.557652 | false |
sloria/sphinx-issues | test_sphinx_issues.py | 1 | 4598 | from tempfile import mkdtemp
from shutil import rmtree
try:
from unittest.mock import Mock
except ImportError:
from unittest.mock import Mock
from sphinx.application import Sphinx
from sphinx_issues import (
issue_role,
user_role,
pr_role,
cve_role,
commit_role,
setup as issues_setup,
)
import pytest
@pytest.yield_fixture(
params=[
# Parametrize config
{"issues_github_path": "marshmallow-code/marshmallow"},
{
"issues_uri": "https://github.com/marshmallow-code/marshmallow/issues/{issue}",
"issues_pr_uri": "https://github.com/marshmallow-code/marshmallow/pull/{pr}",
"issues_commit_uri": "https://github.com/marshmallow-code/marshmallow/commit/{commit}",
},
]
)
def app(request):
src, doctree, confdir, outdir = [mkdtemp() for _ in range(4)]
Sphinx._log = lambda self, message, wfile, nonl=False: None
app = Sphinx(
srcdir=src, confdir=None, outdir=outdir, doctreedir=doctree, buildername="html"
)
issues_setup(app)
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = request.param
try:
app.config.init_values()
except TypeError:
app.config.init_values(lambda x: x)
yield app
[rmtree(x) for x in (src, doctree, confdir, outdir)]
@pytest.fixture()
def inliner(app):
return Mock(document=Mock(settings=Mock(env=Mock(app=app))))
@pytest.mark.parametrize(
("role", "role_name", "text", "expected_text", "expected_url"),
[
(
issue_role,
"issue",
"42",
"#42",
"https://github.com/marshmallow-code/marshmallow/issues/42",
),
(
pr_role,
"pr",
"42",
"#42",
"https://github.com/marshmallow-code/marshmallow/pull/42",
),
(user_role, "user", "sloria", "@sloria", "https://github.com/sloria"),
(
user_role,
"user",
"Steven Loria <sloria>",
"Steven Loria",
"https://github.com/sloria",
),
(
cve_role,
"cve",
"CVE-2018-17175",
"CVE-2018-17175",
"https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175",
),
(
commit_role,
"commit",
"123abc456def",
"123abc4",
"https://github.com/marshmallow-code/marshmallow/commit/123abc456def",
),
# External issue
(
issue_role,
"issue",
"sloria/webargs#42",
"sloria/webargs#42",
"https://github.com/sloria/webargs/issues/42",
),
# External PR
(
pr_role,
"pr",
"sloria/webargs#42",
"sloria/webargs#42",
"https://github.com/sloria/webargs/pull/42",
),
# External commit
(
commit_role,
"commit",
"sloria/webargs@abc123def456",
"sloria/webargs@abc123d",
"https://github.com/sloria/webargs/commit/abc123def456",
),
],
)
def test_roles(inliner, role, role_name, text, expected_text, expected_url):
result = role(role_name, rawtext="", text=text, lineno=None, inliner=inliner)
link = result[0][0]
assert link.astext() == expected_text
assert link.attributes["refuri"] == expected_url
def test_issue_role_multiple(inliner):
result = issue_role(
name=None, rawtext="", text="42,43", inliner=inliner, lineno=None
)
link1 = result[0][0]
assert link1.astext() == "#42"
issue_url = "https://github.com/marshmallow-code/marshmallow/issues/"
assert link1.attributes["refuri"] == issue_url + "42"
sep = result[0][1]
assert sep.astext() == ", "
link2 = result[0][2]
assert link2.astext() == "#43"
assert link2.attributes["refuri"] == issue_url + "43"
def test_issue_role_multiple_with_external(inliner):
result = issue_role(
"issue", rawtext="", text="42,sloria/konch#43", inliner=inliner, lineno=None
)
link1 = result[0][0]
assert link1.astext() == "#42"
issue_url = "https://github.com/marshmallow-code/marshmallow/issues/42"
assert link1.attributes["refuri"] == issue_url
sep = result[0][1]
assert sep.astext() == ", "
link2 = result[0][2]
assert link2.astext() == "sloria/konch#43"
assert link2.attributes["refuri"] == "https://github.com/sloria/konch/issues/43"
| mit | -2,553,931,016,830,603,300 | 1,780,098,423,446,355,700 | 28.101266 | 99 | 0.562853 | false |
hbhdytf/mac2 | build/lib.linux-x86_64-2.7/swift/common/middleware/account_quotas.py | 39 | 5676 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``account_quotas`` is a middleware which blocks write requests (PUT, POST) if a
given account quota (in bytes) is exceeded while DELETE requests are still
allowed.
``account_quotas`` uses the ``x-account-meta-quota-bytes`` metadata entry to
store the quota. Write requests to this metadata entry are only permitted for
resellers. There is no quota limit if ``x-account-meta-quota-bytes`` is not
set.
The ``account_quotas`` middleware should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware.
For example::
[pipeline:main]
pipeline = catch_errors cache tempauth account_quotas proxy-server
[filter:account_quotas]
use = egg:swift#account_quotas
To set the quota on an account::
swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:10000
Remove the quota::
swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:
The same limitations apply for the account quotas as for the container quotas.
For example, when uploading an object without a content-length header the proxy
server doesn't know the final size of the currently uploaded object and the
upload will be allowed if the current account size is within the quota.
Due to the eventual consistency further uploads might be possible until the
account size has been updated.
"""
from swift.common.constraints import check_copy_from_header
from swift.common.swob import HTTPForbidden, HTTPBadRequest, \
HTTPRequestEntityTooLarge, wsgify
from swift.common.utils import register_swift_info
from swift.proxy.controllers.base import get_account_info, get_object_info
class AccountQuotaMiddleware(object):
"""Account quota middleware
See above for a full description.
"""
def __init__(self, app, *args, **kwargs):
self.app = app
@wsgify
def __call__(self, request):
if request.method not in ("POST", "PUT", "COPY"):
return self.app
try:
ver, account, container, obj = request.split_path(
2, 4, rest_with_last=True)
except ValueError:
return self.app
if not container:
# account request, so we pay attention to the quotas
new_quota = request.headers.get(
'X-Account-Meta-Quota-Bytes')
remove_quota = request.headers.get(
'X-Remove-Account-Meta-Quota-Bytes')
else:
# container or object request; even if the quota headers are set
# in the request, they're meaningless
new_quota = remove_quota = None
if remove_quota:
new_quota = 0 # X-Remove dominates if both are present
if request.environ.get('reseller_request') is True:
if new_quota and not new_quota.isdigit():
return HTTPBadRequest()
return self.app
# deny quota set for non-reseller
if new_quota is not None:
return HTTPForbidden()
if request.method == "POST" or not obj:
return self.app
if request.method == 'COPY':
copy_from = container + '/' + obj
else:
if 'x-copy-from' in request.headers:
src_cont, src_obj = check_copy_from_header(request)
copy_from = "%s/%s" % (src_cont, src_obj)
else:
copy_from = None
content_length = (request.content_length or 0)
account_info = get_account_info(request.environ, self.app)
if not account_info or not account_info['bytes']:
return self.app
try:
quota = int(account_info['meta'].get('quota-bytes', -1))
except ValueError:
return self.app
if quota < 0:
return self.app
if copy_from:
path = '/' + ver + '/' + account + '/' + copy_from
object_info = get_object_info(request.environ, self.app, path)
if not object_info or not object_info['length']:
content_length = 0
else:
content_length = int(object_info['length'])
new_size = int(account_info['bytes']) + content_length
if quota < new_size:
resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.')
if 'swift.authorize' in request.environ:
orig_authorize = request.environ['swift.authorize']
def reject_authorize(*args, **kwargs):
aresp = orig_authorize(*args, **kwargs)
if aresp:
return aresp
return resp
request.environ['swift.authorize'] = reject_authorize
else:
return resp
return self.app
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
register_swift_info('account_quotas')
def account_quota_filter(app):
return AccountQuotaMiddleware(app)
return account_quota_filter
| apache-2.0 | -6,797,932,485,761,597,000 | 6,685,033,599,507,502,000 | 34.037037 | 79 | 0.633192 | false |
macosforge/ccs-calendarserver | txdav/caldav/datastore/scheduling/ischedule/remoteservers.py | 1 | 6936 | ##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.filepath import CachingFilePath as FilePath
from twext.python.log import Logger
from twistedcaldav.config import config, fullServerPath
from twistedcaldav import xmlutil
"""
XML based iSchedule configuration file handling. This is for handling of remote servers. The localservers.py module
handles servers that are local (podded).
"""
__all__ = [
"IScheduleServers",
]
log = Logger()
class IScheduleServers(object):
_fileInfo = None
_xmlFile = None
_servers = None
_domainMap = None
def __init__(self):
if IScheduleServers._servers is None:
self._loadConfig()
def _loadConfig(self):
if config.Scheduling.iSchedule.RemoteServers:
if IScheduleServers._servers is None:
IScheduleServers._xmlFile = FilePath(
fullServerPath(
config.ConfigRoot,
config.Scheduling.iSchedule.RemoteServers,
)
)
if IScheduleServers._xmlFile.exists():
IScheduleServers._xmlFile.restat()
fileInfo = (IScheduleServers._xmlFile.getmtime(), IScheduleServers._xmlFile.getsize())
if fileInfo != IScheduleServers._fileInfo:
parser = IScheduleServersParser(IScheduleServers._xmlFile)
IScheduleServers._servers = parser.servers
self._mapDomains()
IScheduleServers._fileInfo = fileInfo
else:
IScheduleServers._servers = ()
IScheduleServers._domainMap = {}
else:
IScheduleServers._servers = ()
IScheduleServers._domainMap = {}
def _mapDomains(self):
IScheduleServers._domainMap = {}
for server in IScheduleServers._servers:
for domain in server.domains:
IScheduleServers._domainMap[domain] = server
def mapDomain(self, domain):
"""
Map a calendar user address domain to a suitable server that can
handle server-to-server requests for that user.
"""
return IScheduleServers._domainMap.get(domain)
ELEMENT_SERVERS = "servers"
ELEMENT_SERVER = "server"
ELEMENT_URI = "uri"
ELEMENT_AUTHENTICATION = "authentication"
ATTRIBUTE_TYPE = "type"
ATTRIBUTE_BASICAUTH = "basic"
ELEMENT_USER = "user"
ELEMENT_PASSWORD = "password"
ELEMENT_ALLOW_REQUESTS_FROM = "allow-requests-from"
ELEMENT_ALLOW_REQUESTS_TO = "allow-requests-to"
ELEMENT_DOMAINS = "domains"
ELEMENT_DOMAIN = "domain"
ELEMENT_CLIENT_HOSTS = "hosts"
ELEMENT_HOST = "host"
class IScheduleServersParser(object):
"""
Server-to-server configuration file parser.
"""
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.xmlFile)
def __init__(self, xmlFile):
self.servers = []
# Read in XML
_ignore_etree, servers_node = xmlutil.readXML(xmlFile.path, ELEMENT_SERVERS)
self._parseXML(servers_node)
def _parseXML(self, node):
"""
Parse the XML root node from the server-to-server configuration document.
@param node: the L{Node} to parse.
"""
for child in node:
if child.tag == ELEMENT_SERVER:
self.servers.append(IScheduleServerRecord())
self.servers[-1].parseXML(child)
class IScheduleServerRecord (object):
"""
Contains server-to-server details.
"""
def __init__(self, uri=None, rewriteCUAddresses=True, moreHeaders=[], podding=False):
"""
@param recordType: record type for directory entry.
"""
self.uri = ""
self.authentication = None
self.allow_from = False
self.allow_to = True
self.domains = []
self.client_hosts = []
self.rewriteCUAddresses = rewriteCUAddresses
self.moreHeaders = moreHeaders
self._podding = podding
if uri:
self.uri = uri
self._parseDetails()
def details(self):
return (self.ssl, self.host, self.port, self.path,)
def podding(self):
return self._podding
def redirect(self, location):
"""
Permanent redirect for the lifetime of this record.
"""
self.uri = location
self._parseDetails()
def parseXML(self, node):
for child in node:
if child.tag == ELEMENT_URI:
self.uri = child.text
elif child.tag == ELEMENT_AUTHENTICATION:
self._parseAuthentication(child)
elif child.tag == ELEMENT_ALLOW_REQUESTS_FROM:
self.allow_from = True
elif child.tag == ELEMENT_ALLOW_REQUESTS_TO:
self.allow_to = True
elif child.tag == ELEMENT_DOMAINS:
self._parseList(child, ELEMENT_DOMAIN, self.domains)
elif child.tag == ELEMENT_CLIENT_HOSTS:
self._parseList(child, ELEMENT_HOST, self.client_hosts)
else:
raise RuntimeError("[{}] Unknown attribute: {}".format(self.__class__, child.tag,))
self._parseDetails()
def _parseList(self, node, element_name, appendto):
for child in node:
if child.tag == element_name:
appendto.append(child.text)
def _parseAuthentication(self, node):
if node.get(ATTRIBUTE_TYPE) != ATTRIBUTE_BASICAUTH:
return
for child in node:
if child.tag == ELEMENT_USER:
user = child.text
elif child.tag == ELEMENT_PASSWORD:
password = child.text
self.authentication = ("basic", user, password,)
def _parseDetails(self):
# Extract scheme, host, port and path
if self.uri.startswith("http://"):
self.ssl = False
rest = self.uri[7:]
elif self.uri.startswith("https://"):
self.ssl = True
rest = self.uri[8:]
splits = rest.split("/", 1)
hostport = splits[0].split(":")
self.host = hostport[0]
if len(hostport) > 1:
self.port = int(hostport[1])
else:
self.port = {False: 80, True: 443}[self.ssl]
self.path = "/"
if len(splits) > 1:
self.path += splits[1]
| apache-2.0 | 5,781,105,912,511,312,000 | -3,744,721,110,517,818,000 | 30.527273 | 115 | 0.598039 | false |
rocky/python3-trepan | test/unit/test-cmdfns.py | 1 | 2471 | #!/usr/bin/env python3
'Unit test for trepan.processor.command.cmdfns'
import unittest
from trepan.processor import cmdfns as Mcmdfns
class TestCommandHelper(unittest.TestCase):
def setUp(self):
self.errors = []
return
def errmsg(self, msg):
self.errors.append(msg)
return
def test_get_an_int(self):
self.assertEqual(0, Mcmdfns.get_an_int(self.errmsg, '0', 'foo', 0))
self.assertEqual(0, len(self.errors))
self.assertEqual(6, Mcmdfns.get_an_int(self.errmsg, '6*1', 'foo', 5))
self.assertEqual(0, len(self.errors))
self.assertEqual(None, Mcmdfns.get_an_int(self.errmsg, '0',
'0 is too small', 5))
self.assertEqual(1, len(self.errors))
self.assertEqual(None, Mcmdfns.get_an_int(self.errmsg, '4+a',
'4+a is invalid', 5))
self.assertEqual('4+a is invalid', self.errors[-1])
return
def test_get_int(self):
self.assertEqual(1, Mcmdfns.get_int(self.errmsg, '1', 5))
self.assertEqual(3, Mcmdfns.get_int(self.errmsg, '1+2', 5))
self.assertEqual(5, Mcmdfns.get_int(self.errmsg, None, 5))
self.assertEqual(1, Mcmdfns.get_int(self.errmsg, None))
self.assertRaises(ValueError, Mcmdfns.get_int,
*(self.errmsg, 'Foo', 5))
return
def test_get_onoff(self):
for arg in ('1', 'on'):
self.assertEqual(True, Mcmdfns.get_onoff(self.errmsg, arg))
pass
for arg in ('0', 'off'):
self.assertEqual(False, Mcmdfns.get_onoff(self.errmsg, arg))
pass
for result in (True, False):
self.assertEqual(result, Mcmdfns.get_onoff(self.errmsg, None,
result))
pass
self.assertRaises(ValueError, Mcmdfns.get_onoff, *(self.errmsg,
'Foo'))
return
def test_want_different_line(self):
for cmd, default, expected in [
('s+', False, True),
('s-', True, False),
('s', False, False),
('n', True, True) ]:
self.assertEqual(expected,
Mcmdfns.want_different_line(cmd, default),
cmd)
pass
return
pass
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,379,965,173,949,179,000 | -2,059,627,134,446,633,500 | 34.811594 | 77 | 0.522461 | false |
BrandonY/python-docs-samples | appengine/standard/multitenancy/datastore_test.py | 9 | 1124 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webtest
import datastore
def test_datastore(testbed):
app = webtest.TestApp(datastore.app)
response = app.get('/datastore')
assert response.status_int == 200
assert 'Global: 1' in response.body
response = app.get('/datastore/a')
assert response.status_int == 200
assert 'Global: 2' in response.body
assert 'a: 1' in response.body
response = app.get('/datastore/b')
assert response.status_int == 200
assert 'Global: 3' in response.body
assert 'b: 1' in response.body
| apache-2.0 | -2,177,413,003,655,234,300 | -6,507,573,845,816,532,000 | 31.114286 | 74 | 0.719751 | false |
matthiasrichter/AliceO2 | Analysis/Scripts/update_ccdb.py | 3 | 6042 | #!/usr/bin/env python3
# Copyright 2019-2020 CERN and copyright holders of ALICE O2.
# See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
# All rights not expressly granted are reserved.
#
# This software is distributed under the terms of the GNU General Public
# License v3 (GPL Version 3), copied verbatim in the file "COPYING".
#
# In applying this license CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""
Script to update the CCDB with timestamp non-overlapping objects.
If an object is found in the range specified, the object is split into two.
If the requested range was overlapping three objects are uploaded on CCDB:
1) latest object with requested timestamp validity
2) old object with validity [old_lower_validity-requested_lower_bound]
3) old object with validity [requested_upper_bound, old_upper_validity]
Author: Nicolo' Jacazio on 2020-06-22
TODO add support for 3 files update
"""
import subprocess
from datetime import datetime
import matplotlib.pyplot as plt
import argparse
def convert_timestamp(ts):
"""
Converts the timestamp in milliseconds in human readable format
"""
return datetime.utcfromtimestamp(ts/1000).strftime('%Y-%m-%d %H:%M:%S')
def get_ccdb_obj(path, timestamp, dest="/tmp/", verbose=0):
"""
Gets the ccdb object from 'path' and 'timestamp' and downloads it into 'dest'
"""
if verbose:
print("Getting obj", path, "with timestamp",
timestamp, convert_timestamp(timestamp))
cmd = f"o2-ccdb-downloadccdbfile --path {path} --dest {dest} --timestamp {timestamp}"
subprocess.run(cmd.split())
def get_ccdb_obj_validity(path, dest="/tmp/", verbose=0):
"""
Gets the timestamp validity for an object downloaded from CCDB.
Returns a list with the initial and end timestamps.
"""
cmd = f"o2-ccdb-inspectccdbfile {dest}{path}/snapshot.root"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
output = output.decode("utf-8").split("\n")
error = error.decode("utf-8").split("\n") if error is not None else error
if verbose:
print("out:")
print(*output, "\n")
print("err:")
print(error)
result = list(filter(lambda x: x.startswith('Valid-'), output))
ValidFrom = result[0].split()
ValidUntil = result[1].split()
return [int(ValidFrom[-1]), int(ValidUntil[-1])]
def upload_ccdb_obj(path, timestamp_from, timestamp_until, dest="/tmp/", meta=""):
"""
Uploads a new object to CCDB in the 'path' using the validity timestamp specified
"""
print("Uploading obj", path, "with timestamp", [timestamp_from, timestamp_until],
convert_timestamp(timestamp_from), convert_timestamp(timestamp_until))
key = path.split("/")[-1]
cmd = f"o2-ccdb-upload -f {dest}{path}/snapshot.root "
cmd += f"--key {key} --path {path} "
cmd += f"--starttimestamp {timestamp_from} --endtimestamp {timestamp_until} --meta \"{meta}\""
subprocess.run(cmd.split())
def main(path, timestamp_from, timestamp_until, verbose=0, show=False):
"""
Used to upload a new object to CCDB in 'path' valid from 'timestamp_from' to 'timestamp_until'
Gets the object from CCDB specified in 'path' and for 'timestamp_from-1'
Gets the object from CCDB specified in 'path' and for 'timestamp_until+1'
If required plots the situation before and after the update
"""
get_ccdb_obj(path, timestamp_from-1)
val_before = get_ccdb_obj_validity(path, verbose=verbose)
get_ccdb_obj(path, timestamp_until+1)
val_after = get_ccdb_obj_validity(path, verbose=verbose)
overlap_before = val_before[1] > timestamp_from
overlap_after = val_after[0] < timestamp_until
if verbose:
if overlap_before:
print("Previous objects overalps")
if overlap_after:
print("Next objects overalps")
trimmed_before = val_before if not overlap_before else [
val_before[0], timestamp_from - 1]
trimmed_after = val_after if not overlap_after else [
timestamp_until+1, val_after[1]]
if show:
fig, ax = plt.subplots()
fig
def bef_af(v, y):
return [v[0] - 1] + v + [v[1] + 1], [0, y, y, 0]
if True:
ax.plot(*bef_af(val_before, 0.95), label='before')
ax.plot(*bef_af(val_after, 1.05), label='after')
if False:
ax.plot(*bef_af(trimmed_before, 0.9), label='trimmed before')
ax.plot(*bef_af(trimmed_after, 1.1), label='trimmed after')
ax.plot(*bef_af([timestamp_from, timestamp_until], 1), label='object')
xlim = 10000000
plt.xlim([timestamp_from-xlim, timestamp_until+xlim])
plt.ylim(0, 2)
plt.xlabel('Timestamp')
plt.ylabel('Validity')
plt.legend()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Uploads timestamp non overlapping objects to CCDB."
"Basic example: `./update_ccdb.py qc/TOF/TOFTaskCompressed/hDiagnostic 1588956517161 1588986517161 --show --verbose`")
parser.add_argument('path', metavar='path_to_object', type=str,
help='Path of the object in the CCDB repository')
parser.add_argument('timestamp_from', metavar='from_timestamp', type=int,
help='Timestamp of start for the new object to use')
parser.add_argument('timestamp_until', metavar='until_timestamp', type=int,
help='Timestamp of stop for the new object to use')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--show', '-s', action='count', default=0)
args = parser.parse_args()
main(path=args.path,
timestamp_from=args.timestamp_from,
timestamp_until=args.timestamp_until,
verbose=args.verbose,
show=args.show)
| gpl-3.0 | -588,976,026,275,363,300 | -3,622,150,140,864,950,300 | 40.668966 | 126 | 0.659219 | false |
Aravinthu/odoo | addons/website_event_sale/models/sale_order.py | 16 | 4747 | # -*- coding: utf-8 -*-
from odoo import api, models, _
from odoo.exceptions import UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.multi
def _cart_find_product_line(self, product_id=None, line_id=None, **kwargs):
self.ensure_one()
lines = super(SaleOrder, self)._cart_find_product_line(product_id, line_id)
if line_id:
return lines
domain = [('id', 'in', lines.ids)]
if self.env.context.get("event_ticket_id"):
domain.append(('event_ticket_id', '=', self.env.context.get("event_ticket_id")))
return self.env['sale.order.line'].sudo().search(domain)
@api.multi
def _website_product_id_change(self, order_id, product_id, qty=0):
order = self.env['sale.order'].sudo().browse(order_id)
if self._context.get('pricelist') != order.pricelist_id.id:
self = self.with_context(pricelist=order.pricelist_id.id)
values = super(SaleOrder, self)._website_product_id_change(order_id, product_id, qty=qty)
event_ticket_id = None
if self.env.context.get("event_ticket_id"):
event_ticket_id = self.env.context.get("event_ticket_id")
else:
product = self.env['product.product'].browse(product_id)
if product.event_ticket_ids:
event_ticket_id = product.event_ticket_ids[0].id
if event_ticket_id:
ticket = self.env['event.event.ticket'].browse(event_ticket_id)
if product_id != ticket.product_id.id:
raise UserError(_("The ticket doesn't match with this product."))
values['product_id'] = ticket.product_id.id
values['event_id'] = ticket.event_id.id
values['event_ticket_id'] = ticket.id
values['price_unit'] = ticket.price_reduce or ticket.price
values['name'] = "%s\n%s" % (ticket.event_id.display_name, ticket.name)
# avoid writing related values that end up locking the product record
values.pop('event_ok', None)
return values
@api.multi
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
OrderLine = self.env['sale.order.line']
if line_id:
line = OrderLine.browse(line_id)
ticket = line.event_ticket_id
old_qty = int(line.product_uom_qty)
if ticket.id:
self = self.with_context(event_ticket_id=ticket.id, fixed_price=1)
else:
line = None
ticket = self.env['event.event.ticket'].search([('product_id', '=', product_id)], limit=1)
old_qty = 0
new_qty = set_qty if set_qty else (add_qty or 0 + old_qty)
# case: buying tickets for a sold out ticket
values = {}
if ticket and ticket.seats_availability == 'limited' and ticket.seats_available <= 0:
values['warning'] = _('Sorry, The %(ticket)s tickets for the %(event)s event are sold out.') % {
'ticket': ticket.name,
'event': ticket.event_id.name}
new_qty, set_qty, add_qty = 0, 0, 0
# case: buying tickets, too much attendees
elif ticket and ticket.seats_availability == 'limited' and new_qty > ticket.seats_available:
values['warning'] = _('Sorry, only %(remaining_seats)d seats are still available for the %(ticket)s ticket for the %(event)s event.') % {
'remaining_seats': ticket.seats_available,
'ticket': ticket.name,
'event': ticket.event_id.name}
new_qty, set_qty, add_qty = ticket.seats_available, ticket.seats_available, 0
values.update(super(SaleOrder, self)._cart_update(product_id, line_id, add_qty, set_qty, **kwargs))
# removing attendees
if ticket and new_qty < old_qty:
attendees = self.env['event.registration'].search([
('state', '!=', 'cancel'),
('sale_order_id', 'in', self.ids), # To avoid break on multi record set
('event_ticket_id', '=', ticket.id),
], offset=new_qty, limit=(old_qty - new_qty), order='create_date asc')
attendees.button_reg_cancel()
# adding attendees
elif ticket and new_qty > old_qty:
line = OrderLine.browse(values['line_id'])
line._update_registrations(confirm=False, cancel_to_draft=True, registration_data=kwargs.get('registration_data', []))
# add in return values the registrations, to display them on website (or not)
values['attendee_ids'] = self.env['event.registration'].search([('sale_order_line_id', '=', line.id), ('state', '!=', 'cancel')]).ids
return values
| agpl-3.0 | 2,521,970,127,472,919,600 | 2,855,835,957,624,465,000 | 47.438776 | 149 | 0.589846 | false |
CGATOxford/bioconda-recipes | recipes/topas/topas.py | 38 | 2648 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'TOPAS.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
if '--jar_dir' in sys.argv[1:]:
print(jar_path)
else:
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit | -591,669,864,279,615,900 | 3,402,130,568,174,691,000 | 28.422222 | 175 | 0.63142 | false |
fabianrost84/cython | Cython/Plex/Errors.py | 33 | 1169 | #=======================================================================
#
# Python Lexical Analyser
#
# Exception classes
#
#=======================================================================
class PlexError(Exception):
message = ""
class PlexTypeError(PlexError, TypeError):
pass
class PlexValueError(PlexError, ValueError):
pass
class InvalidRegex(PlexError):
pass
class InvalidToken(PlexError):
def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
class InvalidScanner(PlexError):
pass
class AmbiguousAction(PlexError):
message = "Two tokens with different actions can match the same string"
def __init__(self):
pass
class UnrecognizedInput(PlexError):
scanner = None
position = None
state_name = None
def __init__(self, scanner, state_name):
self.scanner = scanner
self.position = scanner.get_position()
self.state_name = state_name
def __str__(self):
return ("'%s', line %d, char %d: Token not recognised in state %r" % (
self.position + (self.state_name,)))
| apache-2.0 | -8,382,247,765,242,799,000 | 5,483,563,033,979,246,000 | 20.648148 | 81 | 0.562019 | false |
analurandis/Tur | backend/venv/Lib/site-packages/sphinx/builders/qthelp.py | 11 | 10819 | # -*- coding: utf-8 -*-
"""
sphinx.builders.qthelp
~~~~~~~~~~~~~~~~~~~~~~
Build input files for the Qt collection generator.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import codecs
import posixpath
from os import path
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import force_decode
from sphinx.util.pycompat import htmlescape
_idpattern = re.compile(
r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
# Qt Help Collection Project (.qhcp).
# Is the input file for the help collection generator.
# It contains references to compressed help files which should be
# included in the collection.
# It may contain various other information for customizing Qt Assistant.
collection_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QHelpCollectionProject version="1.0">
<assistant>
<title>%(title)s</title>
<homePage>%(homepage)s</homePage>
<startPage>%(startpage)s</startPage>
</assistant>
<docFiles>
<generate>
<file>
<input>%(outname)s.qhp</input>
<output>%(outname)s.qch</output>
</file>
</generate>
<register>
<file>%(outname)s.qch</file>
</register>
</docFiles>
</QHelpCollectionProject>
'''
# Qt Help Project (.qhp)
# This is the input file for the help generator.
# It contains the table of contents, indices and references to the
# actual documentation files (*.html).
# In addition it defines a unique namespace for the documentation.
project_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QtHelpProject version="1.0">
<namespace>%(namespace)s</namespace>
<virtualFolder>doc</virtualFolder>
<customFilter name="%(project)s %(version)s">
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
</customFilter>
<filterSection>
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
<toc>
<section title="%(title)s" ref="%(masterdoc)s.html">
%(sections)s
</section>
</toc>
<keywords>
%(keywords)s
</keywords>
<files>
%(files)s
</files>
</filterSection>
</QtHelpProject>
'''
section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>'
class QtHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs Qt help project, contents and index files.
"""
name = 'qthelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
#self.config.html_style = 'traditional.css'
def handle_finish(self):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
self.info('writing project file...')
# sections
tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
prune_toctrees=False)
istoctree = lambda node: (
isinstance(node, addnodes.compact_paragraph)
and node.has_key('toctree'))
sections = []
for node in tocdoc.traverse(istoctree):
sections.extend(self.write_toc(node))
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
sections.append(' ' * 4 * 4 + item)
# sections may be unicode strings or byte strings, we have to make sure
# they are all unicode strings before joining them
new_sections = []
for section in sections:
if not isinstance(section, unicode):
new_sections.append(force_decode(section, None))
else:
new_sections.append(section)
sections = u'\n'.join(new_sections)
# keywords
keywords = []
index = self.env.create_index(self, group_entries=False)
for (key, group) in index:
for title, (refs, subitems) in group:
keywords.extend(self.build_keywords(title, refs, subitems))
keywords = u'\n'.join(keywords)
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
projectfiles = []
staticdir = path.join(outdir, '_static')
imagesdir = path.join(outdir, '_images')
for root, dirs, files in os.walk(outdir):
resourcedir = root.startswith(staticdir) or \
root.startswith(imagesdir)
for fn in files:
if (resourcedir and not fn.endswith('.js')) or \
fn.endswith('.html'):
filename = path.join(root, fn)[olen:]
projectfiles.append(file_template %
{'filename': htmlescape(filename)})
projectfiles = '\n'.join(projectfiles)
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
# dots, are also forbidden
nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
nspace = re.sub('[^a-zA-Z0-9.]', '', nspace)
nspace = re.sub(r'\.+', '.', nspace).strip('.')
nspace = nspace.lower()
# write the project file
f = codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8')
try:
f.write(project_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_title),
'version': htmlescape(self.config.version),
'project': htmlescape(self.config.project),
'namespace': htmlescape(nspace),
'masterdoc': htmlescape(self.config.master_doc),
'sections': sections,
'keywords': keywords,
'files': projectfiles})
finally:
f.close()
homepage = 'qthelp://' + posixpath.join(
nspace, 'doc', self.get_target_uri(self.config.master_doc))
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
self.info('writing collection project file...')
f = codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8')
try:
f.write(collection_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_short_title),
'homepage': htmlescape(homepage),
'startpage': htmlescape(startpage)})
finally:
f.close()
def isdocnode(self, node):
if not isinstance(node, nodes.list_item):
return False
if len(node.children) != 2:
return False
if not isinstance(node.children[0], addnodes.compact_paragraph):
return False
if not isinstance(node.children[0][0], nodes.reference):
return False
if not isinstance(node.children[1], nodes.bullet_list):
return False
return True
def write_toc(self, node, indentlevel=4):
# XXX this should return a Unicode string, not a bytestring
parts = []
if self.isdocnode(node):
refnode = node.children[0][0]
link = refnode['refuri']
title = htmlescape(refnode.astext()).replace('"', '"')
item = '<section title="%(title)s" ref="%(ref)s">' % \
{'title': title, 'ref': link}
parts.append(' '*4*indentlevel + item)
for subnode in node.children[1]:
parts.extend(self.write_toc(subnode, indentlevel+1))
parts.append(' '*4*indentlevel + '</section>')
elif isinstance(node, nodes.list_item):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, nodes.reference):
link = node['refuri']
title = htmlescape(node.astext()).replace('"','"')
item = section_template % {'title': title, 'ref': link}
item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, addnodes.compact_paragraph):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
return parts
def keyword_item(self, name, ref):
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
shortname = groupdict['title']
id = groupdict.get('id')
#descr = groupdict.get('descr')
if shortname.endswith('()'):
shortname = shortname[:-2]
id = '%s.%s' % (id, shortname)
else:
id = None
if id:
item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % (
name, id, ref[1])
else:
item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1])
item.encode('ascii', 'xmlcharrefreplace')
return item
def build_keywords(self, title, refs, subitems):
keywords = []
title = htmlescape(title)
# if len(refs) == 0: # XXX
# write_param('See Also', title)
if len(refs) == 1:
keywords.append(self.keyword_item(title, refs[0]))
elif len(refs) > 1:
for i, ref in enumerate(refs): # XXX
# item = (' '*12 +
# '<keyword name="%s [%d]" ref="%s"/>' % (
# title, i, ref))
# item.encode('ascii', 'xmlcharrefreplace')
# keywords.append(item)
keywords.append(self.keyword_item(title, ref))
if subitems:
for subitem in subitems:
keywords.extend(self.build_keywords(subitem[0], subitem[1], []))
return keywords
| mit | 5,959,118,224,913,985,000 | -1,935,735,167,212,759,000 | 35.550676 | 80 | 0.558092 | false |
jaingaurav/Diamond | src/diamond/handler/test/teststatsdhandler.py | 20 | 3122 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import unittest
from test import run_only
from mock import patch
import configobj
from diamond.handler.stats_d import StatsdHandler
from diamond.metric import Metric
def run_only_if_statsd_is_available(func):
try:
import statsd
except ImportError:
statsd = None
pred = lambda: statsd is not None
return run_only(func, pred)
class TestStatsdHandler(unittest.TestCase):
@run_only_if_statsd_is_available
@patch('statsd.StatsClient')
def test_single_gauge(self, mock_client):
config = configobj.ConfigObj()
config['host'] = 'localhost'
config['port'] = '9999'
config['batch'] = 1
metric = Metric('servers.com.example.www.cpu.total.idle',
123, raw_value=123, timestamp=1234567,
host='will-be-ignored', metric_type='GAUGE')
expected_data = ('servers.com.example.www.cpu.total.idle', 123)
handler = StatsdHandler(config)
handler.process(metric)
handler.connection.gauge.assert_called_with(*expected_data)
handler.connection.send.assert_called_with()
@run_only_if_statsd_is_available
@patch('statsd.StatsClient')
def test_single_counter(self, mock_client):
config = configobj.ConfigObj()
config['host'] = 'localhost'
config['port'] = '9999'
config['batch'] = 1
metric = Metric('servers.com.example.www.cpu.total.idle',
5, raw_value=123, timestamp=1234567,
host='will-be-ignored', metric_type='COUNTER')
expected_data = ('servers.com.example.www.cpu.total.idle', 123)
handler = StatsdHandler(config)
handler.process(metric)
handler.connection.incr.assert_called_with(*expected_data)
handler.connection.send.assert_called_with()
@run_only_if_statsd_is_available
@patch('statsd.StatsClient')
def test_multiple_counter(self, mock_client):
config = configobj.ConfigObj()
config['host'] = 'localhost'
config['port'] = '9999'
config['batch'] = 1
metric1 = Metric('servers.com.example.www.cpu.total.idle',
5, raw_value=123, timestamp=1234567,
host='will-be-ignored', metric_type='COUNTER')
metric2 = Metric('servers.com.example.www.cpu.total.idle',
7, raw_value=128, timestamp=1234567,
host='will-be-ignored', metric_type='COUNTER')
expected_data1 = ('servers.com.example.www.cpu.total.idle', 123)
expected_data2 = ('servers.com.example.www.cpu.total.idle', 5)
handler = StatsdHandler(config)
handler.process(metric1)
handler.connection.incr.assert_called_with(*expected_data1)
handler.connection.send.assert_called_with()
handler.process(metric2)
handler.connection.incr.assert_called_with(*expected_data2)
handler.connection.send.assert_called_with()
| mit | 3,437,116,541,470,589,000 | 3,795,237,019,357,207,000 | 32.934783 | 74 | 0.609865 | false |
ckjoshi9/Auto-Mate-for-Tinder | Django App/tinderapp/src/Pixel.py | 2 | 1491 | from __future__ import division
from colorsys import *
class Pixel:
def __init__(self, x, y, red, green, blue):
self.x = x
self.y = y
self.red = red
self.green = green
self.blue = blue
self.region = None
@property
def region(self):
return self.region
@region.setter
def region(self, value):
self.region = value
@property
def x(self):
return self.x
@property
def y(self):
return self.y
def in_region(self):
if self.region == None:
return False
else:
return True
def is_skin(self):
r = self.red
g = self.green
b = self.blue
rgbClassifier = ((r > 95) and (g > 40 and g < 100) and (b > 20) and ((max(r, g, b) - min(r, g, b)) > 15) and (abs(r-g) > 15) and (r > g) and (r > b))
normalizedRGBClassifier = False
if r != 0 and g != 0 and b != 0:
normR = (r/(r + g + b))
normG = (g/(r + g + b))
normB = (b/(r + g + b))
normalizedRGBClassifier = (((normR/normG) > 1.185) and (((r * b)/(pow(r + g + b, 2))) > 0.107) and (((r * g)/(pow(r + g + b,2))) > 0.112))
hsv = rgb_to_hsv(r, g, b)
hsvClassifier = (hsv[0] > 0 and hsv[0] < 35 and hsv[1] > 0.23 and hsv[1] < 0.68)
return (rgbClassifier or normalizedRGBClassifier or hsvClassifier)
def intensity(self):
return (self.red + self.green + self.blue)/3
| mit | 4,133,386,481,876,117,500 | -1,258,871,448,836,626,200 | 28.82 | 157 | 0.501677 | false |
pquentin/libcloud | libcloud/storage/drivers/s3.py | 3 | 42505 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hmac
import time
from hashlib import sha1
import libcloud.utils.py3
try:
if libcloud.utils.py3.DEFAULT_LXML:
from lxml.etree import Element, SubElement
else:
from xml.etree.ElementTree import Element, SubElement
except ImportError:
from xml.etree.ElementTree import Element, SubElement
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlquote
from libcloud.utils.py3 import b
from libcloud.utils.py3 import tostring
from libcloud.utils.xml import fixxpath, findtext
from libcloud.utils.files import read_in_chunks
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.base import ConnectionUserAndKey, RawResponse
from libcloud.common.aws import AWSBaseResponse, AWSDriver, \
AWSTokenConnection, SignedAWSConnection
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
# How long before the token expires
EXPIRATION_SECONDS = 15 * 60
S3_US_STANDARD_HOST = 's3.amazonaws.com'
S3_US_EAST2_HOST = 's3-us-east-2.amazonaws.com'
S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com'
S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com'
S3_US_GOV_WEST_HOST = 's3-us-gov-west-1.amazonaws.com'
S3_CN_NORTH_HOST = 's3.cn-north-1.amazonaws.com.cn'
S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com'
S3_EU_WEST2_HOST = 's3-eu-west-2.amazonaws.com'
S3_EU_CENTRAL_HOST = 's3-eu-central-1.amazonaws.com'
S3_AP_SOUTH_HOST = 's3-ap-south-1.amazonaws.com'
S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com'
S3_AP_SOUTHEAST2_HOST = 's3-ap-southeast-2.amazonaws.com'
S3_AP_NORTHEAST1_HOST = 's3-ap-northeast-1.amazonaws.com'
S3_AP_NORTHEAST2_HOST = 's3-ap-northeast-2.amazonaws.com'
S3_AP_NORTHEAST_HOST = S3_AP_NORTHEAST1_HOST
S3_SA_EAST_HOST = 's3-sa-east-1.amazonaws.com'
S3_SA_SOUTHEAST2_HOST = 's3-sa-east-2.amazonaws.com'
S3_CA_CENTRAL_HOST = 's3-ca-central-1.amazonaws.com'
API_VERSION = '2006-03-01'
NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION)
# AWS multi-part chunks must be minimum 5MB
CHUNK_SIZE = 5 * 1024 * 1024
# Desired number of items in each response inside a paginated request in
# ex_iterate_multipart_uploads.
RESPONSES_PER_REQUEST = 100
class S3Response(AWSBaseResponse):
namespace = None
valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,
httplib.BAD_REQUEST]
def success(self):
i = int(self.status)
return i >= 200 and i <= 299 or i in self.valid_response_codes
def parse_error(self):
if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:
raise InvalidCredsError(self.body)
elif self.status == httplib.MOVED_PERMANENTLY:
raise LibcloudError('This bucket is located in a different ' +
'region. Please use the correct driver.',
driver=S3StorageDriver)
raise LibcloudError('Unknown error. Status code: %d' % (self.status),
driver=S3StorageDriver)
class S3RawResponse(S3Response, RawResponse):
pass
class BaseS3Connection(ConnectionUserAndKey):
"""
Represents a single connection to the S3 Endpoint
"""
host = 's3.amazonaws.com'
responseCls = S3Response
rawResponseCls = S3RawResponse
@staticmethod
def get_auth_signature(method, headers, params, expires, secret_key, path,
vendor_prefix):
"""
Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID,
UTF-8-Encoding-Of( StringToSign ) ) ) );
StringToSign = HTTP-VERB + "\n" +
Content-MD5 + "\n" +
Content-Type + "\n" +
Expires + "\n" +
CanonicalizedVendorHeaders +
CanonicalizedResource;
"""
special_headers = {'content-md5': '', 'content-type': '', 'date': ''}
vendor_headers = {}
for key, value in list(headers.items()):
key_lower = key.lower()
if key_lower in special_headers:
special_headers[key_lower] = value.strip()
elif key_lower.startswith(vendor_prefix):
vendor_headers[key_lower] = value.strip()
if expires:
special_headers['date'] = str(expires)
buf = [method]
for _, value in sorted(special_headers.items()):
buf.append(value)
string_to_sign = '\n'.join(buf)
buf = []
for key, value in sorted(vendor_headers.items()):
buf.append('%s:%s' % (key, value))
header_string = '\n'.join(buf)
values_to_sign = []
for value in [string_to_sign, header_string, path]:
if value:
values_to_sign.append(value)
string_to_sign = '\n'.join(values_to_sign)
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
)
return b64_hmac.decode('utf-8')
def add_default_params(self, params):
expires = str(int(time.time()) + EXPIRATION_SECONDS)
params['AWSAccessKeyId'] = self.user_id
params['Expires'] = expires
return params
def pre_connect_hook(self, params, headers):
params['Signature'] = self.get_auth_signature(
method=self.method, headers=headers, params=params,
expires=params['Expires'], secret_key=self.key, path=self.action,
vendor_prefix=self.driver.http_vendor_prefix)
return params, headers
class S3Connection(AWSTokenConnection, BaseS3Connection):
"""
Represents a single connection to the S3 endpoint, with AWS-specific
features.
"""
pass
class S3SignatureV4Connection(SignedAWSConnection, BaseS3Connection):
service_name = 's3'
version = API_VERSION
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None, token=None,
retry_delay=None, backoff=None):
super(S3SignatureV4Connection, self).__init__(
user_id, key, secure, host,
port, url, timeout, proxy_url,
token, retry_delay, backoff,
4) # force version 4
class S3MultipartUpload(object):
"""
Class representing an amazon s3 multipart upload
"""
def __init__(self, key, id, created_at, initiator, owner):
"""
Class representing an amazon s3 multipart upload
:param key: The object/key that was being uploaded
:type key: ``str``
:param id: The upload id assigned by amazon
:type id: ``str``
:param created_at: The date/time at which the upload was started
:type created_at: ``str``
:param initiator: The AWS owner/IAM user who initiated this
:type initiator: ``str``
:param owner: The AWS owner/IAM who will own this object
:type owner: ``str``
"""
self.key = key
self.id = id
self.created_at = created_at
self.initiator = initiator
self.owner = owner
def __repr__(self):
return ('<S3MultipartUpload: key=%s>' % (self.key))
class BaseS3StorageDriver(StorageDriver):
name = 'Amazon S3 (standard)'
website = 'http://aws.amazon.com/s3/'
connectionCls = BaseS3Connection
hash_type = 'md5'
supports_chunked_encoding = False
supports_s3_multipart_upload = True
ex_location_name = ''
namespace = NAMESPACE
http_vendor_prefix = 'x-amz'
def iterate_containers(self):
response = self.connection.request('/')
if response.status == httplib.OK:
containers = self._to_containers(obj=response.object,
xpath='Buckets/Bucket')
return containers
raise LibcloudError('Unexpected status code: %s' % (response.status),
driver=self)
def list_container_objects(self, container, ex_prefix=None):
"""
Return a list of objects for the given container.
:param container: Container instance.
:type container: :class:`Container`
:param ex_prefix: Only return objects starting with ex_prefix
:type ex_prefix: ``str``
:return: A list of Object instances.
:rtype: ``list`` of :class:`Object`
"""
return list(self.iterate_container_objects(container,
ex_prefix=ex_prefix))
def iterate_container_objects(self, container, ex_prefix=None):
"""
Return a generator of objects for the given container.
:param container: Container instance
:type container: :class:`Container`
:param ex_prefix: Only return objects starting with ex_prefix
:type ex_prefix: ``str``
:return: A generator of Object instances.
:rtype: ``generator`` of :class:`Object`
"""
params = {}
if ex_prefix:
params['prefix'] = ex_prefix
last_key = None
exhausted = False
container_path = self._get_container_path(container)
while not exhausted:
if last_key:
params['marker'] = last_key
response = self.connection.request(container_path,
params=params)
if response.status != httplib.OK:
raise LibcloudError('Unexpected status code: %s' %
(response.status), driver=self)
objects = self._to_objs(obj=response.object,
xpath='Contents', container=container)
is_truncated = response.object.findtext(fixxpath(
xpath='IsTruncated', namespace=self.namespace)).lower()
exhausted = (is_truncated == 'false')
last_key = None
for obj in objects:
last_key = obj.name
yield obj
def get_container(self, container_name):
try:
response = self.connection.request('/%s' % container_name,
method='HEAD')
if response.status == httplib.NOT_FOUND:
raise ContainerDoesNotExistError(value=None, driver=self,
container_name=container_name)
except InvalidCredsError:
# This just means the user doesn't have IAM permissions to do a
# HEAD request but other requests might work.
pass
return Container(name=container_name, extra=None, driver=self)
def get_object(self, container_name, object_name):
container = self.get_container(container_name=container_name)
object_path = self._get_object_path(container, object_name)
response = self.connection.request(object_path, method='HEAD')
if response.status == httplib.OK:
obj = self._headers_to_object(object_name=object_name,
container=container,
headers=response.headers)
return obj
raise ObjectDoesNotExistError(value=None, driver=self,
object_name=object_name)
def _get_container_path(self, container):
"""
Return a container path
:param container: Container instance
:type container: :class:`Container`
:return: A path for this container.
:rtype: ``str``
"""
return '/%s' % (container.name)
def _get_object_path(self, container, object_name):
"""
Return an object's CDN path.
:param container: Container instance
:type container: :class:`Container`
:param object_name: Object name
:type object_name: :class:`str`
:return: A path for this object.
:rtype: ``str``
"""
container_url = self._get_container_path(container)
object_name_cleaned = self._clean_object_name(object_name)
object_path = '%s/%s' % (container_url, object_name_cleaned)
return object_path
def create_container(self, container_name):
if self.ex_location_name:
root = Element('CreateBucketConfiguration')
child = SubElement(root, 'LocationConstraint')
child.text = self.ex_location_name
data = tostring(root)
else:
data = ''
response = self.connection.request('/%s' % (container_name),
data=data,
method='PUT')
if response.status == httplib.OK:
container = Container(name=container_name, extra=None, driver=self)
return container
elif response.status == httplib.CONFLICT:
raise InvalidContainerNameError(
value='Container with this name already exists. The name must '
'be unique among all the containers in the system',
container_name=container_name, driver=self)
elif response.status == httplib.BAD_REQUEST:
raise ContainerError(
value='Bad request when creating container: %s' %
response.body,
container_name=container_name, driver=self)
raise LibcloudError('Unexpected status code: %s' % (response.status),
driver=self)
def delete_container(self, container):
# Note: All the objects in the container must be deleted first
response = self.connection.request('/%s' % (container.name),
method='DELETE')
if response.status == httplib.NO_CONTENT:
return True
elif response.status == httplib.CONFLICT:
raise ContainerIsNotEmptyError(
value='Container must be empty before it can be deleted.',
container_name=container.name, driver=self)
elif response.status == httplib.NOT_FOUND:
raise ContainerDoesNotExistError(value=None,
driver=self,
container_name=container.name)
return False
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
obj_path = self._get_object_path(obj.container, obj.name)
response = self.connection.request(obj_path, method='GET', raw=True)
return self._get_object(obj=obj, callback=self._save_object,
response=response,
callback_kwargs={
'obj': obj,
'response': response.response,
'destination_path': destination_path,
'overwrite_existing': overwrite_existing,
'delete_on_failure': delete_on_failure},
success_status_code=httplib.OK)
def download_object_as_stream(self, obj, chunk_size=None):
obj_path = self._get_object_path(obj.container, obj.name)
response = self.connection.request(obj_path, method='GET',
stream=True, raw=True)
return self._get_object(
obj=obj, callback=read_in_chunks,
response=response,
callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE),
'chunk_size': chunk_size},
success_status_code=httplib.OK)
def upload_object(self, file_path, container, object_name, extra=None,
verify_hash=True, ex_storage_class=None):
"""
@inherits: :class:`StorageDriver.upload_object`
:param ex_storage_class: Storage class
:type ex_storage_class: ``str``
"""
return self._put_object(container=container, object_name=object_name,
extra=extra, file_path=file_path,
verify_hash=verify_hash,
storage_class=ex_storage_class)
def _initiate_multipart(self, container, object_name, headers=None):
"""
Initiates a multipart upload to S3
:param container: The destination container
:type container: :class:`Container`
:param object_name: The name of the object which we are uploading
:type object_name: ``str``
:keyword headers: Additional headers to send with the request
:type headers: ``dict``
:return: The id of the newly created multipart upload
:rtype: ``str``
"""
headers = headers or {}
request_path = self._get_object_path(container, object_name)
params = {'uploads': ''}
response = self.connection.request(request_path, method='POST',
headers=headers, params=params)
if response.status != httplib.OK:
raise LibcloudError('Error initiating multipart upload',
driver=self)
return findtext(element=response.object, xpath='UploadId',
namespace=self.namespace)
def _upload_multipart_chunks(self, container, object_name, upload_id,
stream, calculate_hash=True):
"""
Uploads data from an iterator in fixed sized chunks to S3
:param container: The destination container
:type container: :class:`Container`
:param object_name: The name of the object which we are uploading
:type object_name: ``str``
:param upload_id: The upload id allocated for this multipart upload
:type upload_id: ``str``
:param stream: The generator for fetching the upload data
:type stream: ``generator``
:keyword calculate_hash: Indicates if we must calculate the data hash
:type calculate_hash: ``bool``
:return: A tuple of (chunk info, checksum, bytes transferred)
:rtype: ``tuple``
"""
data_hash = None
if calculate_hash:
data_hash = self._get_hash_function()
bytes_transferred = 0
count = 1
chunks = []
params = {'uploadId': upload_id}
request_path = self._get_object_path(container, object_name)
# Read the input data in chunk sizes suitable for AWS
for data in read_in_chunks(stream, chunk_size=CHUNK_SIZE,
fill_size=True, yield_empty=True):
bytes_transferred += len(data)
if calculate_hash:
data_hash.update(data)
chunk_hash = self._get_hash_function()
chunk_hash.update(data)
chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8')
# The Content-MD5 header provides an extra level of data check and
# is recommended by amazon
headers = {
'Content-Length': len(data),
'Content-MD5': chunk_hash,
}
params['partNumber'] = count
resp = self.connection.request(request_path, method='PUT',
data=data, headers=headers,
params=params)
if resp.status != httplib.OK:
raise LibcloudError('Error uploading chunk', driver=self)
server_hash = resp.headers['etag'].replace('"', '')
# Keep this data for a later commit
chunks.append((count, server_hash))
count += 1
if calculate_hash:
data_hash = data_hash.hexdigest()
return (chunks, data_hash, bytes_transferred)
def _commit_multipart(self, container, object_name, upload_id, chunks):
"""
Makes a final commit of the data.
:param container: The destination container
:type container: :class:`Container`
:param object_name: The name of the object which we are uploading
:type object_name: ``str``
:param upload_id: The upload id allocated for this multipart upload
:type upload_id: ``str``
:param chunks: A list of (chunk_number, chunk_hash) tuples.
:type chunks: ``list``
:return: The server side hash of the uploaded data
:rtype: ``str``
"""
root = Element('CompleteMultipartUpload')
for (count, etag) in chunks:
part = SubElement(root, 'Part')
part_no = SubElement(part, 'PartNumber')
part_no.text = str(count)
etag_id = SubElement(part, 'ETag')
etag_id.text = str(etag)
data = tostring(root)
headers = {'Content-Length': len(data)}
params = {'uploadId': upload_id}
request_path = self._get_object_path(container, object_name)
response = self.connection.request(request_path, headers=headers,
params=params, data=data,
method='POST')
if response.status != httplib.OK:
element = response.object
# pylint: disable=maybe-no-member
code, message = response._parse_error_details(element=element)
msg = 'Error in multipart commit: %s (%s)' % (message, code)
raise LibcloudError(msg, driver=self)
# Get the server's etag to be passed back to the caller
body = response.parse_body()
server_hash = body.find(fixxpath(xpath='ETag',
namespace=self.namespace)).text
return server_hash
def _abort_multipart(self, container, object_name, upload_id):
"""
Aborts an already initiated multipart upload
:param container: The destination container
:type container: :class:`Container`
:param object_name: The name of the object which we are uploading
:type object_name: ``str``
:param upload_id: The upload id allocated for this multipart upload
:type upload_id: ``str``
"""
params = {'uploadId': upload_id}
request_path = self._get_object_path(container, object_name)
resp = self.connection.request(request_path, method='DELETE',
params=params)
if resp.status != httplib.NO_CONTENT:
raise LibcloudError('Error in multipart abort. status_code=%d' %
(resp.status), driver=self)
def upload_object_via_stream(self, iterator, container, object_name,
extra=None, ex_storage_class=None):
"""
@inherits: :class:`StorageDriver.upload_object_via_stream`
:param ex_storage_class: Storage class
:type ex_storage_class: ``str``
"""
method = 'PUT'
params = None
# This driver is used by other S3 API compatible drivers also.
# Amazon provides a different (complex?) mechanism to do multipart
# uploads
if self.supports_s3_multipart_upload:
return self._put_object_multipart(container=container,
object_name=object_name,
extra=extra,
stream=iterator,
verify_hash=False,
storage_class=ex_storage_class)
return self._put_object(container=container, object_name=object_name,
extra=extra, method=method, query_args=params,
stream=iterator, verify_hash=False,
storage_class=ex_storage_class)
def delete_object(self, obj):
object_path = self._get_object_path(obj.container, obj.name)
response = self.connection.request(object_path, method='DELETE')
if response.status == httplib.NO_CONTENT:
return True
elif response.status == httplib.NOT_FOUND:
raise ObjectDoesNotExistError(value=None, driver=self,
object_name=obj.name)
return False
def ex_iterate_multipart_uploads(self, container, prefix=None,
delimiter=None):
"""
Extension method for listing all in-progress S3 multipart uploads.
Each multipart upload which has not been committed or aborted is
considered in-progress.
:param container: The container holding the uploads
:type container: :class:`Container`
:keyword prefix: Print only uploads of objects with this prefix
:type prefix: ``str``
:keyword delimiter: The object/key names are grouped based on
being split by this delimiter
:type delimiter: ``str``
:return: A generator of S3MultipartUpload instances.
:rtype: ``generator`` of :class:`S3MultipartUpload`
"""
if not self.supports_s3_multipart_upload:
raise LibcloudError('Feature not supported', driver=self)
# Get the data for a specific container
request_path = self._get_container_path(container)
params = {'max-uploads': RESPONSES_PER_REQUEST, 'uploads': ''}
if prefix:
params['prefix'] = prefix
if delimiter:
params['delimiter'] = delimiter
def finder(node, text):
return node.findtext(fixxpath(xpath=text,
namespace=self.namespace))
while True:
response = self.connection.request(request_path, params=params)
if response.status != httplib.OK:
raise LibcloudError('Error fetching multipart uploads. '
'Got code: %s' % response.status,
driver=self)
body = response.parse_body()
# pylint: disable=maybe-no-member
for node in body.findall(fixxpath(xpath='Upload',
namespace=self.namespace)):
initiator = node.find(fixxpath(xpath='Initiator',
namespace=self.namespace))
owner = node.find(fixxpath(xpath='Owner',
namespace=self.namespace))
key = finder(node, 'Key')
upload_id = finder(node, 'UploadId')
created_at = finder(node, 'Initiated')
initiator = finder(initiator, 'DisplayName')
owner = finder(owner, 'DisplayName')
yield S3MultipartUpload(key, upload_id, created_at,
initiator, owner)
# Check if this is the last entry in the listing
# pylint: disable=maybe-no-member
is_truncated = body.findtext(fixxpath(xpath='IsTruncated',
namespace=self.namespace))
if is_truncated.lower() == 'false':
break
# Provide params for the next request
upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker',
namespace=self.namespace))
key_marker = body.findtext(fixxpath(xpath='NextKeyMarker',
namespace=self.namespace))
params['key-marker'] = key_marker
params['upload-id-marker'] = upload_marker
def ex_cleanup_all_multipart_uploads(self, container, prefix=None):
"""
Extension method for removing all partially completed S3 multipart
uploads.
:param container: The container holding the uploads
:type container: :class:`Container`
:keyword prefix: Delete only uploads of objects with this prefix
:type prefix: ``str``
"""
# Iterate through the container and delete the upload ids
for upload in self.ex_iterate_multipart_uploads(container, prefix,
delimiter=None):
self._abort_multipart(container, upload.key, upload.id)
def _clean_object_name(self, name):
name = urlquote(name)
return name
def _put_object(self, container, object_name, method='PUT',
query_args=None, extra=None, file_path=None,
stream=None, verify_hash=True, storage_class=None):
headers = {}
extra = extra or {}
headers.update(self._to_storage_class_headers(storage_class))
content_type = extra.get('content_type', None)
meta_data = extra.get('meta_data', None)
acl = extra.get('acl', None)
if meta_data:
for key, value in list(meta_data.items()):
key = self.http_vendor_prefix + '-meta-%s' % (key)
headers[key] = value
if acl:
headers[self.http_vendor_prefix + '-acl'] = acl
request_path = self._get_object_path(container, object_name)
if query_args:
request_path = '?'.join((request_path, query_args))
result_dict = self._upload_object(
object_name=object_name, content_type=content_type,
request_path=request_path, request_method=method,
headers=headers, file_path=file_path, stream=stream)
response = result_dict['response']
bytes_transferred = result_dict['bytes_transferred']
headers = response.headers
response = response
server_hash = headers.get('etag', '').replace('"', '')
if (verify_hash and result_dict['data_hash'] != server_hash):
raise ObjectHashMismatchError(
value='MD5 hash {0} checksum does not match {1}'.format(
server_hash, result_dict['data_hash']),
object_name=object_name, driver=self)
elif response.status == httplib.OK:
obj = Object(
name=object_name, size=bytes_transferred, hash=server_hash,
extra={'acl': acl}, meta_data=meta_data, container=container,
driver=self)
return obj
else:
raise LibcloudError(
'Unexpected status code, status_code=%s' % (response.status),
driver=self)
def _put_object_multipart(self, container, object_name, stream,
extra=None, verify_hash=False,
storage_class=None):
"""
Uploads an object using the S3 multipart algorithm.
:param container: The destination container
:type container: :class:`Container`
:param object_name: The name of the object which we are uploading
:type object_name: ``str``
:param stream: The generator for fetching the upload data
:type stream: ``generator``
:keyword verify_hash: Indicates if we must calculate the data hash
:type verify_hash: ``bool``
:keyword extra: Additional options
:type extra: ``dict``
:keyword storage_class: The name of the S3 object's storage class
:type extra: ``str``
:return: The uploaded object
:rtype: :class:`Object`
"""
headers = {}
extra = extra or {}
headers.update(self._to_storage_class_headers(storage_class))
content_type = extra.get('content_type', None)
meta_data = extra.get('meta_data', None)
acl = extra.get('acl', None)
if content_type:
headers['Content-Type'] = content_type
if meta_data:
for key, value in list(meta_data.items()):
key = self.http_vendor_prefix + '-meta-%s' % (key)
headers[key] = value
if acl:
headers[self.http_vendor_prefix + '-acl'] = acl
upload_id = self._initiate_multipart(container, object_name,
headers=headers)
try:
result = self._upload_multipart_chunks(container, object_name,
upload_id, stream,
calculate_hash=verify_hash)
chunks, data_hash, bytes_transferred = result
# Commit the chunk info and complete the upload
etag = self._commit_multipart(container, object_name, upload_id,
chunks)
except Exception:
# Amazon provides a mechanism for aborting an upload.
self._abort_multipart(container, object_name, upload_id)
raise
return Object(
name=object_name, size=bytes_transferred, hash=etag,
extra={'acl': acl}, meta_data=meta_data, container=container,
driver=self)
def _to_storage_class_headers(self, storage_class):
"""
Generates request headers given a storage class name.
:keyword storage_class: The name of the S3 object's storage class
:type extra: ``str``
:return: Headers to include in a request
:rtype: :dict:
"""
headers = {}
storage_class = storage_class or 'standard'
if storage_class not in ['standard', 'reduced_redundancy']:
raise ValueError(
'Invalid storage class value: %s' % (storage_class))
key = self.http_vendor_prefix + '-storage-class'
headers[key] = storage_class.upper()
return headers
def _to_containers(self, obj, xpath):
for element in obj.findall(fixxpath(xpath=xpath,
namespace=self.namespace)):
yield self._to_container(element)
def _to_objs(self, obj, xpath, container):
return [self._to_obj(element, container) for element in
obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))]
def _to_container(self, element):
extra = {
'creation_date': findtext(element=element, xpath='CreationDate',
namespace=self.namespace)
}
container = Container(name=findtext(element=element, xpath='Name',
namespace=self.namespace),
extra=extra,
driver=self
)
return container
def _headers_to_object(self, object_name, container, headers):
hash = headers['etag'].replace('"', '')
extra = {'content_type': headers['content-type'],
'etag': headers['etag']}
meta_data = {}
if 'last-modified' in headers:
extra['last_modified'] = headers['last-modified']
for key, value in headers.items():
if not key.lower().startswith(self.http_vendor_prefix + '-meta-'):
continue
key = key.replace(self.http_vendor_prefix + '-meta-', '')
meta_data[key] = value
obj = Object(name=object_name, size=headers['content-length'],
hash=hash, extra=extra,
meta_data=meta_data,
container=container,
driver=self)
return obj
def _to_obj(self, element, container):
owner_id = findtext(element=element, xpath='Owner/ID',
namespace=self.namespace)
owner_display_name = findtext(element=element,
xpath='Owner/DisplayName',
namespace=self.namespace)
meta_data = {'owner': {'id': owner_id,
'display_name': owner_display_name}}
last_modified = findtext(element=element,
xpath='LastModified',
namespace=self.namespace)
extra = {'last_modified': last_modified}
obj = Object(name=findtext(element=element, xpath='Key',
namespace=self.namespace),
size=int(findtext(element=element, xpath='Size',
namespace=self.namespace)),
hash=findtext(element=element, xpath='ETag',
namespace=self.namespace).replace('"', ''),
extra=extra,
meta_data=meta_data,
container=container,
driver=self
)
return obj
class S3StorageDriver(AWSDriver, BaseS3StorageDriver):
name = 'Amazon S3 (us-east-1)'
connectionCls = S3SignatureV4Connection
region_name = 'us-east-1'
class S3USEast2Connection(S3SignatureV4Connection):
host = S3_US_EAST2_HOST
class S3USEast2StorageDriver(S3StorageDriver):
name = 'Amazon S3 (us-east-2)'
connectionCls = S3USEast2Connection
ex_location_name = 'us-east-2'
region_name = 'us-east-2'
class S3USWestConnection(S3SignatureV4Connection):
host = S3_US_WEST_HOST
class S3USWestStorageDriver(S3StorageDriver):
name = 'Amazon S3 (us-west-1)'
connectionCls = S3USWestConnection
ex_location_name = 'us-west-1'
region_name = 'us-west-1'
class S3USWestOregonConnection(S3SignatureV4Connection):
host = S3_US_WEST_OREGON_HOST
class S3USWestOregonStorageDriver(S3StorageDriver):
name = 'Amazon S3 (us-west-2)'
connectionCls = S3USWestOregonConnection
ex_location_name = 'us-west-2'
region_name = 'us-west-2'
class S3USGovWestConnection(S3SignatureV4Connection):
host = S3_US_GOV_WEST_HOST
class S3USGovWestStorageDriver(S3StorageDriver):
name = 'Amazon S3 (us-gov-west-1)'
connectionCls = S3USGovWestConnection
ex_location_name = 'us-gov-west-1'
region_name = 'us-gov-west-1'
class S3CNNorthConnection(S3SignatureV4Connection):
host = S3_CN_NORTH_HOST
class S3CNNorthStorageDriver(S3StorageDriver):
name = 'Amazon S3 (cn-north-1)'
connectionCls = S3CNNorthConnection
ex_location_name = 'cn-north-1'
region_name = 'cn-north-1'
class S3EUWestConnection(S3SignatureV4Connection):
host = S3_EU_WEST_HOST
class S3EUWestStorageDriver(S3StorageDriver):
name = 'Amazon S3 (eu-west-1)'
connectionCls = S3EUWestConnection
ex_location_name = 'EU'
region_name = 'eu-west-1'
class S3EUWest2Connection(S3SignatureV4Connection):
host = S3_EU_WEST2_HOST
class S3EUWest2StorageDriver(S3StorageDriver):
name = 'Amazon S3 (eu-west-2)'
connectionCls = S3EUWest2Connection
ex_location_name = 'eu-west-2'
region_name = 'eu-west-2'
class S3EUCentralConnection(S3SignatureV4Connection):
host = S3_EU_CENTRAL_HOST
class S3EUCentralStorageDriver(S3StorageDriver):
name = 'Amazon S3 (eu-central-1)'
connectionCls = S3EUCentralConnection
ex_location_name = 'eu-central-1'
region_name = 'eu-central-1'
class S3APSEConnection(S3SignatureV4Connection):
host = S3_AP_SOUTHEAST_HOST
class S3APSEStorageDriver(S3StorageDriver):
name = 'Amazon S3 (ap-southeast-1)'
connectionCls = S3APSEConnection
ex_location_name = 'ap-southeast-1'
region_name = 'ap-southeast-1'
class S3APSE2Connection(S3SignatureV4Connection):
host = S3_AP_SOUTHEAST2_HOST
class S3APSE2StorageDriver(S3StorageDriver):
name = 'Amazon S3 (ap-southeast-2)'
connectionCls = S3APSE2Connection
ex_location_name = 'ap-southeast-2'
region_name = 'ap-southeast-2'
class S3APNE1Connection(S3SignatureV4Connection):
host = S3_AP_NORTHEAST1_HOST
S3APNEConnection = S3APNE1Connection
class S3APNE1StorageDriver(S3StorageDriver):
name = 'Amazon S3 (ap-northeast-1)'
connectionCls = S3APNEConnection
ex_location_name = 'ap-northeast-1'
region_name = 'ap-northeast-1'
S3APNEStorageDriver = S3APNE1StorageDriver
class S3APNE2Connection(S3SignatureV4Connection):
host = S3_AP_NORTHEAST2_HOST
class S3APNE2StorageDriver(S3StorageDriver):
name = 'Amazon S3 (ap-northeast-2)'
connectionCls = S3APNE2Connection
ex_location_name = 'ap-northeast-2'
region_name = 'ap-northeast-2'
class S3APSouthConnection(S3SignatureV4Connection):
host = S3_AP_SOUTH_HOST
class S3APSouthStorageDriver(S3StorageDriver):
name = 'Amazon S3 (ap-south-1)'
connectionCls = S3APSouthConnection
ex_location_name = 'ap-south-1'
region_name = 'ap-south-1'
class S3SAEastConnection(S3SignatureV4Connection):
host = S3_SA_EAST_HOST
class S3SAEastStorageDriver(S3StorageDriver):
name = 'Amazon S3 (sa-east-1)'
connectionCls = S3SAEastConnection
ex_location_name = 'sa-east-1'
region_name = 'sa-east-1'
class S3CACentralConnection(S3SignatureV4Connection):
host = S3_CA_CENTRAL_HOST
class S3CACentralStorageDriver(S3StorageDriver):
name = 'Amazon S3 (ca-central-1)'
connectionCls = S3CACentralConnection
ex_location_name = 'ca-central-1'
region_name = 'ca-central-1'
| apache-2.0 | 3,435,936,633,774,291,500 | -6,138,231,109,187,863,000 | 35.298036 | 79 | 0.585084 | false |
liu602348184/django | django/contrib/postgres/fields/ranges.py | 172 | 5636 | import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from django.utils import six
from .utils import AttributeSetter
__all__ = [
'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
'FloatRangeField', 'DateTimeRangeField', 'DateRangeField',
]
class RangeField(models.Field):
empty_strings_allowed = False
def get_prep_value(self, value):
if value is None:
return None
elif isinstance(value, Range):
return value
elif isinstance(value, (list, tuple)):
return self.range_type(value[0], value[1])
return value
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
for end in ('lower', 'upper'):
if end in vals:
vals[end] = self.base_field.to_python(vals[end])
value = self.range_type(**vals)
elif isinstance(value, (list, tuple)):
value = self.range_type(value[0], value[1])
return value
def set_attributes_from_name(self, name):
super(RangeField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
return None
if value.isempty:
return json.dumps({"empty": True})
base_field = self.base_field
result = {"bounds": value._bounds}
for end in ('lower', 'upper'):
obj = AttributeSetter(base_field.attname, getattr(value, end))
result[end] = base_field.value_to_string(obj)
return json.dumps(result)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', self.form_field)
return super(RangeField, self).formfield(**kwargs)
class IntegerRangeField(RangeField):
base_field = models.IntegerField()
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int4range'
class BigIntegerRangeField(RangeField):
base_field = models.BigIntegerField()
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int8range'
class FloatRangeField(RangeField):
base_field = models.FloatField()
range_type = NumericRange
form_field = forms.FloatRangeField
def db_type(self, connection):
return 'numrange'
class DateTimeRangeField(RangeField):
base_field = models.DateTimeField()
range_type = DateTimeTZRange
form_field = forms.DateTimeRangeField
def db_type(self, connection):
return 'tstzrange'
class DateRangeField(RangeField):
base_field = models.DateField()
range_type = DateRange
form_field = forms.DateRangeField
def db_type(self, connection):
return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class RangeContainedBy(models.Lookup):
lookup_name = 'contained_by'
type_mapping = {
'integer': 'int4range',
'bigint': 'int8range',
'double precision': 'numrange',
'date': 'daterange',
'timestamp with time zone': 'tstzrange',
}
def as_sql(self, qn, connection):
field = self.lhs.output_field
if isinstance(field, models.FloatField):
sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
else:
sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return sql % (lhs, rhs), params
def get_prep_lookup(self):
return RangeField().get_prep_lookup(self.lookup_name, self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.BigIntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_lt'
operator = '<<'
@RangeField.register_lookup
class FullGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_gt'
operator = '>>'
@RangeField.register_lookup
class NotLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_lt'
operator = '&>'
@RangeField.register_lookup
class NotGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_gt'
operator = '&<'
@RangeField.register_lookup
class AdjacentToLookup(lookups.PostgresSimpleLookup):
lookup_name = 'adjacent_to'
operator = '-|-'
@RangeField.register_lookup
class RangeStartsWith(lookups.FunctionTransform):
lookup_name = 'startswith'
function = 'lower'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(lookups.FunctionTransform):
lookup_name = 'endswith'
function = 'upper'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(lookups.FunctionTransform):
lookup_name = 'isempty'
function = 'isempty'
output_field = models.BooleanField()
| bsd-3-clause | 7,870,034,727,776,962,000 | 1,337,735,751,124,414,700 | 27.321608 | 94 | 0.671043 | false |
spencerlyon2/pygments | pygments/lexers/data.py | 2 | 17895 | # -*- coding: utf-8 -*-
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
include, bygroups
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Punctuation, Literal
__all__ = ['YamlLexer', 'JsonLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
"""
For JSON data structures.
.. versionadded:: 1.5
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = ['application/json']
flags = re.DOTALL
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
| bsd-2-clause | -699,858,837,766,886,000 | -3,948,347,170,338,841,000 | 34.157171 | 83 | 0.486784 | false |
MQQiang/kbengine | kbe/src/lib/python/Lib/threading.py | 61 | 48900 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
from itertools import islice as _islice
try:
from _collections import deque as _deque
except ImportError:
from collections import deque as _deque
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_set_sentinel = _thread._set_sentinel
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count += 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = _deque()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
gotit = False
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
return gotit
finally:
self._acquire_restore(saved_state)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
all_waiters = self._waiters
waiters_to_notify = _deque(_islice(all_waiters, n))
if not waiters_to_notify:
return
for waiter in waiters_to_notify:
waiter.release()
try:
all_waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value += 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._cond:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += 1
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously once they have all
made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is uses as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter += 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
_dangling = WeakSet()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
self._tstate_lock = None
self._started = Event()
self._is_stopped = False
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
# For debugging and _after_fork()
_dangling.add(self)
def _reset_internal_locks(self, is_alive):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
self._started._reset_internal_locks()
if is_alive:
self._set_tstate_lock()
else:
# The thread isn't alive after fork: it doesn't have a tstate
# anymore.
self._is_stopped = True
self._tstate_lock = None
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
self.is_alive() # easy way to get ._is_stopped set when appropriate
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
def _set_tstate_lock(self):
"""
Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted.
"""
self._tstate_lock = _set_sentinel()
self._tstate_lock.acquire()
def _bootstrap_inner(self):
try:
self._set_ident()
self._set_tstate_lock()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
pass
except:
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
# After calling ._stop(), .is_alive() returns False and .join() returns
# immediately. ._tstate_lock must be released before calling ._stop().
#
# Normal case: C code at the end of the thread's life
# (release_sentinel in _threadmodule.c) releases ._tstate_lock, and
# that's detected by our ._wait_for_tstate_lock(), called by .join()
# and .is_alive(). Any number of threads _may_ call ._stop()
# simultaneously (for example, if multiple threads are blocked in
# .join() calls), and they're not serialized. That's harmless -
# they'll just make redundant rebindings of ._is_stopped and
# ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the
# "assert self._is_stopped" in ._wait_for_tstate_lock() always works
# (the assert is executed only if ._tstate_lock is None).
#
# Special case: _main_thread releases ._tstate_lock via this
# module's _shutdown() function.
lock = self._tstate_lock
if lock is not None:
assert not lock.locked()
self._is_stopped = True
self._tstate_lock = None
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if timeout is None:
self._wait_for_tstate_lock()
else:
# the behavior of a negative timeout isn't documented, but
# historically .join(timeout=x) for x<0 has acted as if timeout=0
self._wait_for_tstate_lock(timeout=max(timeout, 0))
def _wait_for_tstate_lock(self, block=True, timeout=-1):
# Issue #18808: wait for the thread state to be gone.
# At the end of the thread's life, after all knowledge of the thread
# is removed from C data structures, C code releases our _tstate_lock.
# This method passes its arguments to _tstate_lock.aquire().
# If the lock is acquired, the C code is done, and self._stop() is
# called. That sets ._is_stopped to True, and ._tstate_lock to None.
lock = self._tstate_lock
if lock is None: # already determined that the C code is done
assert self._is_stopped
elif lock.acquire(block, timeout):
lock.release()
self._stop()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
if self._is_stopped or not self._started.is_set():
return False
self._wait_for_tstate_lock(False)
return not self._is_stopped
isAlive = is_alive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread")
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._set_tstate_lock()
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_main_thread = _MainThread()
def _shutdown():
# Obscure: other threads may be waiting to join _main_thread. That's
# dubious, but some code does it. We can't wait for C code to release
# the main thread's tstate_lock - that won't happen until the interpreter
# is nearly dead. So we release it here. Note that just calling _stop()
# isn't enough: other threads may already be waiting on _tstate_lock.
tlock = _main_thread._tstate_lock
# The main thread isn't finished yet, so its thread state lock can't have
# been released.
assert tlock is not None
assert tlock.locked()
tlock.release()
_main_thread._stop()
t = _pickSomeNonDaemonThread()
while t:
t.join()
t = _pickSomeNonDaemonThread()
_main_thread._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
def main_thread():
"""Return the main thread object.
In normal conditions, the main thread is the thread from which the
Python interpreter was started.
"""
return _main_thread
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock, _main_thread
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
_main_thread = current
with _active_limbo_lock:
# Dangling thread instances must still have their locks reset,
# because someone may join() them.
threads = set(_enumerate())
threads.update(_dangling)
for thread in threads:
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
thread._reset_internal_locks(True)
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._reset_internal_locks(False)
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
| lgpl-3.0 | -8,024,645,297,592,366,000 | 3,675,779,455,321,454,600 | 34.98234 | 101 | 0.606892 | false |
zzragida/PythonExamples | MemberShip/deploy/qa-taiwan/web/db/__init__.py | 4 | 1563 | # -*- coding:utf-8 -*-
from SQLRelay import PySQLRClient
from SQLRelay import PySQLRDB
from config import SQLRELAYS
INSTANCES = []
for sqlrelay in SQLRELAYS:
INSTANCES.append([0, sqlrelay])
def sqlrelay_cursor():
''' Connect sqlrelay rdb '''
info = sorted(INSTANCES, key=lambda x: x[0])[0]
try:
con = PySQLRDB.connect(
info[1]['host'],
info[1]['port'],
'',
info[1]['user'],
info[1]['pass'],
0, 1)
cur = con.cursor()
except PySQLRDB.DatabaseError, e:
raise
info[0] += 1
return con, cur
def sqlrelay_close(cur, con):
''' Close sqlrelay rdb '''
if cur:
cur.close()
del cur
if con:
con.close()
del con
import gc; gc.collect()
def sqlrelay_client_cursor(debug=False):
''' Connect sqlrelay client '''
info = sorted(INSTANCES, key=lambda x: x[0])[0]
try:
con = PySQLRClient.sqlrconnection(
info[1]['host'],
info[1]['port'],
'',
info[1]['user'],
info[1]['pass'],
0, 1)
cur = PySQLRClient.sqlrcursor(con)
if debug:
con.debugOn()
except Exception, e:
raise
info[0] += 1
return con, cur
def sqlrelay_client_close(cur, con):
''' Close sqlrelay client '''
if cur:
del cur
if con:
con.debugOff()
con.endSession()
del con
import gc; gc.collect()
| mit | 1,725,915,786,319,428,000 | 2,614,326,178,193,296,400 | 19.84 | 51 | 0.497761 | false |
algorythmic/bash-completion | test/t/unit/test_unit_count_args.py | 2 | 2035 | import pytest
from conftest import TestUnitBase, assert_bash_exec
@pytest.mark.bashcomp(
cmd=None, ignore_env=r"^[+-](args|COMP_(WORDS|CWORD|LINE|POINT))="
)
class TestUnitCountArgs(TestUnitBase):
def _test(self, *args, **kwargs):
return self._test_unit("_count_args %s; echo $args", *args, **kwargs)
def test_1(self, bash):
assert_bash_exec(bash, "COMP_CWORD= _count_args >/dev/null")
def test_2(self, bash):
"""a b| should set args to 1"""
output = self._test(bash, "(a b)", 1, "a b", 3)
assert output == "1"
def test_3(self, bash):
"""a b|c should set args to 1"""
output = self._test(bash, "(a bc)", 1, "a bc", 3)
assert output == "1"
def test_4(self, bash):
"""a b c| should set args to 2"""
output = self._test(bash, "(a b c)", 2, "a b c", 4)
assert output == "2"
def test_5(self, bash):
"""a b| c should set args to 1"""
output = self._test(bash, "(a b c)", 1, "a b c", 3)
assert output == "1"
def test_6(self, bash):
"""a b -c| d should set args to 2"""
output = self._test(bash, "(a b -c d)", 2, "a b -c d", 6)
assert output == "2"
def test_7(self, bash):
"""a b -c d e| with -c arg excluded should set args to 2"""
output = self._test(
bash, "(a b -c d e)", 4, "a b -c d e", 10, arg='"" "@(-c|--foo)"'
)
assert output == "2"
def test_8(self, bash):
"""a -b -c d e| with -c arg excluded
and -b included should set args to 1"""
output = self._test(
bash,
"(a -b -c d e)",
4,
"a -b -c d e",
11,
arg='"" "@(-c|--foo)" "-[b]"',
)
assert output == "2"
def test_9(self, bash):
"""a -b -c d e| with -b included should set args to 3"""
output = self._test(
bash, "(a -b -c d e)", 4, "a -b -c d e", 11, arg='"" "" "-b"'
)
assert output == "3"
| gpl-2.0 | -3,231,854,902,883,970,600 | -3,018,937,286,797,935,000 | 29.833333 | 77 | 0.475184 | false |
gioman/QGIS | python/plugins/processing/gui/MultipleInputDialog.py | 2 | 4243 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MultipleInputDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
from builtins import basestring
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui'))
class MultipleInputDialog(BASE, WIDGET):
def __init__(self, options, selectedoptions=None):
super(MultipleInputDialog, self).__init__(None)
self.setupUi(self)
self.lstLayers.setSelectionMode(QAbstractItemView.NoSelection)
self.options = []
for i, option in enumerate(options):
if option is None or isinstance(option, basestring):
self.options.append((i, option))
else:
self.options.append((option[0], option[1]))
self.selectedoptions = selectedoptions or []
# Additional buttons
self.btnSelectAll = QPushButton(self.tr('Select all'))
self.buttonBox.addButton(self.btnSelectAll,
QDialogButtonBox.ActionRole)
self.btnClearSelection = QPushButton(self.tr('Clear selection'))
self.buttonBox.addButton(self.btnClearSelection,
QDialogButtonBox.ActionRole)
self.btnToggleSelection = QPushButton(self.tr('Toggle selection'))
self.buttonBox.addButton(self.btnToggleSelection,
QDialogButtonBox.ActionRole)
self.btnSelectAll.clicked.connect(lambda: self.selectAll(True))
self.btnClearSelection.clicked.connect(lambda: self.selectAll(False))
self.btnToggleSelection.clicked.connect(self.toggleSelection)
self.populateList()
def populateList(self):
model = QStandardItemModel()
for value, text in self.options:
item = QStandardItem(text)
item.setData(value, Qt.UserRole)
item.setCheckState(Qt.Checked if value in self.selectedoptions else Qt.Unchecked)
item.setCheckable(True)
model.appendRow(item)
self.lstLayers.setModel(model)
def accept(self):
self.selectedoptions = []
model = self.lstLayers.model()
for i in range(model.rowCount()):
item = model.item(i)
if item.checkState() == Qt.Checked:
self.selectedoptions.append(item.data(Qt.UserRole))
QDialog.accept(self)
def reject(self):
self.selectedoptions = None
QDialog.reject(self)
def selectAll(self, value):
model = self.lstLayers.model()
for i in range(model.rowCount()):
item = model.item(i)
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def toggleSelection(self):
model = self.lstLayers.model()
for i in range(model.rowCount()):
item = model.item(i)
checked = item.checkState() == Qt.Checked
item.setCheckState(Qt.Unchecked if checked else Qt.Checked)
| gpl-2.0 | -2,138,029,585,384,564,200 | 7,111,032,108,548,164,000 | 37.225225 | 93 | 0.573179 | false |
en0/Supybot_sasl | plugins/String/config.py | 8 | 2799 | ###
# Copyright (c) 2003-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('String', True)
String = conf.registerPlugin('String')
conf.registerGroup(String, 'levenshtein')
conf.registerGlobalValue(String.levenshtein, 'max',
registry.PositiveInteger(256, """Determines the maximum size of a string
given to the levenshtein command. The levenshtein command uses an O(m*n)
algorithm, which means that with strings of length 256, it can take 1.5
seconds to finish; with strings of length 384, though, it can take 4
seconds to finish, and with strings of much larger lengths, it takes more
and more time. Using nested commands, strings can get quite large, hence
this variable, to limit the size of arguments passed to the levenshtein
command."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | 7,744,239,225,091,497,000 | 6,734,963,453,073,671,000 | 50.833333 | 79 | 0.765273 | false |
Xeralux/tensorflow | tensorflow/python/keras/_impl/keras/engine/training.py | 1 | 72917 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics as metrics_module
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras.engine import training_arrays
from tensorflow.python.keras._impl.keras.engine import training_eager
from tensorflow.python.keras._impl.keras.engine import training_generator
from tensorflow.python.keras._impl.keras.engine import training_utils
from tensorflow.python.keras._impl.keras.engine.base_layer import Layer
from tensorflow.python.keras._impl.keras.engine.network import Network
from tensorflow.python.keras._impl.keras.utils.generic_utils import slice_arrays
from tensorflow.python.layers.base import _DeferredTensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
loss = loss or {}
if context.executing_eagerly() and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise ValueError('Only TF native optimizers are supported in Eager mode.')
self.optimizer = optimizers.get(optimizer)
self.loss = loss
self.metrics = metrics or []
self.loss_weights = loss_weights
if context.executing_eagerly() and sample_weight_mode is not None:
raise ValueError('sample_weight_mode is not supported in Eager mode.')
self.sample_weight_mode = sample_weight_mode
if context.executing_eagerly() and weighted_metrics is not None:
raise ValueError('weighted_metrics is not supported in Eager mode.')
self.weighted_metrics = weighted_metrics
if context.executing_eagerly() and target_tensors is not None:
raise ValueError('target_tensors is not supported in Eager mode.')
self.target_tensors = target_tensors
if not self.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name + '" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name + '" during training.')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [training_utils.weighted_masked_objective(fn)
for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
if not context.executing_eagerly():
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError(
'When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
self.loss_weights_list = loss_weights_list
# initialization for Eager mode execution
if context.executing_eagerly():
if target_tensors is not None:
raise ValueError('target_tensors are not currently supported in Eager '
'mode.')
self.total_loss = None
self.metrics_tensors = []
self.metrics_names = ['loss']
for i in range(len(self.outputs)):
if len(self.outputs) > 1:
self.metrics_names.append(self.output_names[i] + '_loss')
self.nested_metrics = training_utils.collect_metrics(metrics,
self.output_names)
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
self._feed_sample_weight_modes.append(None)
self.sample_weights = []
self.targets = []
for i in range(len(self.outputs)):
self._feed_output_names.append(self.output_names[i])
self._collected_trainable_weights = self.trainable_weights
return
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors not in (None, []):
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' + str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError(
'Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
else:
raise TypeError('Expected `target_tensors` to be '
'a list or dict, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors not in (None, []):
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError(
'Unknown entry in '
'sample_weight_mode dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError(
'Output "' + name + '" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + 'sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' + str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(array_ops.placeholder_with_default(
[[1.]], shape=[None, None], name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(array_ops.placeholder_with_default(
[1.], shape=[None], name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_target_weighing_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare metrics.
self.weighted_metrics = weighted_metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = training_utils.collect_metrics(metrics, self.output_names)
nested_weighted_metrics = training_utils.collect_metrics(weighted_metrics,
self.output_names)
self.metrics_updates = []
self.stateful_metric_names = []
with K.name_scope('metrics'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weights = sample_weights[i]
output_metrics = nested_metrics[i]
output_weighted_metrics = nested_weighted_metrics[i]
def handle_metrics(metrics, weights=None):
metric_name_prefix = 'weighted_' if weights is not None else ''
for metric in metrics:
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
# custom handling of accuracy/crossentropy
# (because of class mode duality)
output_shape = self.outputs[i].get_shape().as_list()
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy/crossentropy
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.binary_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.binary_crossentropy
elif self.loss_functions[
i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy/crossentropy with sparse targets
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.sparse_categorical_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.sparse_categorical_crossentropy
else:
# case: categorical accuracy/crossentropy
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.categorical_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.categorical_crossentropy
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
metric_name = metric_name_prefix + suffix
else:
metric_fn = metrics_module.get(metric)
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
# Get metric name as string
if hasattr(metric_fn, 'name'):
metric_name = metric_fn.name
else:
metric_name = metric_fn.__name__
metric_name = metric_name_prefix + metric_name
with K.name_scope(metric_name):
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=masks[i])
# Append to self.metrics_names, self.metric_tensors,
# self.stateful_metric_names
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[i], metric_name)
# Dedupe name
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_result)
# Keep track of state updates created by
# stateful metrics (i.e. metrics layers).
if isinstance(metric_fn, Layer):
self.stateful_metric_names.append(metric_name)
self.metrics_updates += metric_fn.updates
handle_metrics(output_metrics)
handle_metrics(output_weighted_metrics, weights=weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_target_weighing_indices:
self._feed_sample_weights.append(self.sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.warning(
UserWarning(
'Discrepancy between trainable weights and collected trainable'
' weights, did you set `model.trainable` without calling'
' `model.compile` after ?'))
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self._feed_inputs)
# Stateful metrics updates
updates += self.metrics_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates + self.metrics_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: An array or list of arrays, to be used as input data. If the model
has known, named inputs, this could also be a dict mapping input names
to the corresponding array.
y: An array or list of arrays, to be used as target data. If the model
has known, named outputs, this could also be a dict mapping output names
to the corresponding array.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
Returns:
A tuple of 3 lists: input arrays, target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
# First, we build/compile the model on the fly if necessary.
all_inputs = []
if not self.built:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
raise ValueError('Please do not pass a dictionary as model inputs.')
else:
if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
self._set_inputs(x)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary as model targets.')
else:
if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
# Typecheck that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if y is not None:
if isinstance(y, (list, tuple)):
all_inputs += list(y)
else:
all_inputs.append(y)
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(x) + '; y=' + str(y))
if context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if not isinstance(y, (list, tuple)):
y = [y]
target_tensors = [v for v in y if tensor_util.is_tensor(v)]
self.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors)
# If `x` and `y` were all symbolic, then no model should not be fed any
# inputs and targets.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if any(tensor_util.is_tensor(v) for v in all_inputs):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if context.executing_eagerly():
# In eager mode, do not do shape validation.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if context.executing_eagerly():
feed_output_names = self.output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
elif not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
# or if it not in the `losses` module, then
# it is a user-defined loss and we make no assumptions
# about it.
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
# Standardize the outputs.
y = training_utils.standardize_input_data(
y,
feed_output_names,
feed_output_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not context.executing_eagerly():
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def _set_inputs(self, inputs, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, or data tensors.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data: we create placeholders matching the shape of the Numpy
arrays. We expect Numpy data to be fed for these placeholders
when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
"""
if self.__class__.__name__ == 'Sequential':
# Note: we can't test whether the model is `Sequential` via `isinstance`
# since `Sequential` depends on `Model`.
if isinstance(inputs, list):
assert len(inputs) == 1
inputs = inputs[0]
self.build(input_shape=(None,) + inputs.shape[1:])
elif context.executing_eagerly():
self._eager_set_inputs(inputs)
else:
self._symbolic_set_inputs(inputs, training=training)
def _set_scope(self, scope=None):
"""Modify the Layer scope creation logic to create ResourceVariables."""
super(Model, self)._set_scope(scope=scope)
# Subclassed Models create ResourceVariables by default. This makes it
# easier to use Models in an eager/graph agnostic way (since eager execution
# always uses ResourceVariables).
if not self._is_graph_network:
self._scope.set_use_resource(True)
def _eager_set_inputs(self, inputs):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
We assume the number and ndim of outputs
does not change over different calls.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
Raises:
ValueError: If the model's inputs are already set.
"""
assert context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of model inputs/outputs as DeferredTensors,
# to keep track of number of inputs and outputs and their ndim.
if isinstance(inputs, (list, tuple)):
dummy_output_values = self.call(
[ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs])
dummy_input_values = list(inputs)
else:
dummy_output_values = self.call(
ops.convert_to_tensor(inputs, dtype=K.floatx()))
dummy_input_values = [inputs]
if isinstance(dummy_output_values, (list, tuple)):
dummy_output_values = list(dummy_output_values)
else:
dummy_output_values = [dummy_output_values]
self.outputs = [
_DeferredTensor(shape=(None for _ in v.shape),
dtype=v.dtype) for v in dummy_output_values]
self.inputs = [
_DeferredTensor(shape=(None for _ in v.shape),
dtype=v.dtype) for v in dummy_input_values]
self.input_names = [
'input_%d' % (i + 1) for i in range(len(dummy_input_values))]
self.output_names = [
'output_%d' % (i + 1) for i in range(len(dummy_output_values))]
self.built = True
def _symbolic_set_inputs(self, inputs, outputs=None, training=None):
"""Set model's inputs and output specs based.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
outputs: None, a data tensor, or a list of data tensors. If None, the
outputs will be determined by invoking self.call(), otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If the model's inputs are already set.
"""
assert not context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
self.inputs = []
self.input_names = []
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
if isinstance(inputs, (list, tuple)):
inputs = list(inputs)
else:
inputs = [inputs]
for i, v in enumerate(inputs):
name = 'input_%d' % (i + 1)
self.input_names.append(name)
if isinstance(v, list):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray)):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
# to specify custom placeholders if the need arises.
shape = (None,) + v.shape[1:]
placeholder = K.placeholder(shape=shape, name=name)
self.inputs.append(placeholder)
self._feed_inputs.append(placeholder)
self._feed_input_names.append(name)
self._feed_input_shapes.append(shape)
else:
# Assumed tensor - TODO(fchollet) additional type check?
self.inputs.append(v)
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(name)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
if len(self.inputs) == 1:
if self._expects_training_arg:
outputs = self.call(self.inputs[0], training=training)
else:
outputs = self.call(self.inputs[0])
else:
if self._expects_training_arg:
outputs = self.call(self.inputs, training=training)
else:
outputs = self.call(self.inputs)
if isinstance(outputs, (list, tuple)):
outputs = list(outputs)
else:
outputs = [outputs]
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Numpy array of training data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
TensorFlow data tensors.
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
TensorFlow data tensors.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling.
validation_data: tuple `(x_val, y_val)` or tuple
`(x_val, y_val, val_sample_weights)` on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# TODO(fchollet): this method may be creating reference cycles, which would
# lead to accumulating garbage in memory when called in a loop. Investigate.
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
if x is None and y is None and steps_per_epoch is None:
raise ValueError('If fitting from data tensors, '
'you should specify the `steps_per_epoch` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size)
# Prepare validation data.
if validation_data:
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' % len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size)
elif validation_split and 0. < validation_split < 1.:
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (slice_arrays(
sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
if context.executing_eagerly():
return training_eager.fit_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
else:
return training_arrays.fit_loop(
self, x, y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Numpy array of test data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
TensorFlow data tensors.
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
TensorFlow data tensors.
batch_size: Integer or `None`.
Number of samples per evaluation step.
If unspecified, `batch_size` will default to 32.
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and y is None and steps is None:
raise ValueError('If evaluating from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size)
if context.executing_eagerly():
return training_eager.test_loop(
self, inputs=x, targets=y, sample_weights=sample_weights,
batch_size=batch_size, verbose=verbose, steps=steps)
else:
return training_arrays.test_loop(
self, inputs=x, targets=y, sample_weights=sample_weights,
batch_size=batch_size, verbose=verbose, steps=steps)
def predict(self, x, batch_size=None, verbose=0, steps=None):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: The input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: Integer. If unspecified, it will default to 32.
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and steps is None:
raise ValueError('If predicting from data tensors, '
'you should specify the `steps` '
'argument.')
x, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
return training_eager.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
else:
return training_arrays.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight)
if context.executing_eagerly():
outputs = training_eager.train_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight)
if context.executing_eagerly():
outputs = training_eager.test_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input samples, as a Numpy array.
Returns:
Numpy array(s) of predictions.
"""
x, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
inputs = [ops.convert_to_tensor(val, dtype=K.floatx()) for val in x]
return self(inputs) # pylint: disable=not-callable
if not context.executing_eagerly():
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`)
object in order to avoid duplicate data
when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single batch.
Therefore, all arrays in this tuple must have the same length (equal
to the size of this batch). Different batches may have different
sizes.
For example, the last batch of the epoch is commonly smaller than
the
others, if the size of the dataset is not divisible by the batch
size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of samples of your dataset
divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`fit_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`evaluate_generator` is not yet enabled for '
'unbuilt Model subclasses')
return training_generator.evaluate_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def predict_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: Maximum size for the generator queue.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`predict_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.predict_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
| apache-2.0 | 2,691,457,283,609,087,500 | -7,295,888,664,581,614,000 | 41.344367 | 99 | 0.614041 | false |
awkspace/ansible | lib/ansible/plugins/callback/slack.py | 40 | 8260 | # (C) 2014-2015, Matt Martz <[email protected]>
# (C) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: slack
callback_type: notification
requirements:
- whitelist in configuration
- prettytable (python library)
short_description: Sends play events to a Slack channel
version_added: "2.1"
description:
- This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
- Before 2.4 only environment variables were available for configuring this plugin
options:
webhook_url:
required: True
description: Slack Webhook URL
env:
- name: SLACK_WEBHOOK_URL
ini:
- section: callback_slack
key: webhook_url
channel:
default: "#ansible"
description: Slack room to post in.
env:
- name: SLACK_CHANNEL
ini:
- section: callback_slack
key: channel
username:
description: Username to post as.
env:
- name: SLACK_USERNAME
default: ansible
ini:
- section: callback_slack
key: username
validate_certs:
description: validate the SSL certificate of the Slack server. (For HTTPS URLs)
version_added: "2.8"
env:
- name: SLACK_VALIDATE_CERTS
ini:
- section: callback_slack
key: validate_certs
default: True
type: bool
'''
import json
import os
import uuid
from ansible import context
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import open_url
from ansible.plugins.callback import CallbackBase
try:
import prettytable
HAS_PRETTYTABLE = True
except ImportError:
HAS_PRETTYTABLE = False
class CallbackModule(CallbackBase):
"""This is an ansible callback plugin that sends status
updates to a Slack channel during playbook execution.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'slack'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
if not HAS_PRETTYTABLE:
self.disabled = True
self._display.warning('The `prettytable` python module is not '
'installed. Disabling the Slack callback '
'plugin.')
self.playbook_name = None
# This is a 6 character identifier provided with each message
# This makes it easier to correlate messages when there are more
# than 1 simultaneous playbooks running
self.guid = uuid.uuid4().hex[:6]
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.webhook_url = self.get_option('webhook_url')
self.channel = self.get_option('channel')
self.username = self.get_option('username')
self.show_invocation = (self._display.verbosity > 1)
self.validate_certs = self.get_option('validate_certs')
if self.webhook_url is None:
self.disabled = True
self._display.warning('Slack Webhook URL was not provided. The '
'Slack Webhook URL can be provided using '
'the `SLACK_WEBHOOK_URL` environment '
'variable.')
def send_msg(self, attachments):
headers = {
'Content-type': 'application/json',
}
payload = {
'channel': self.channel,
'username': self.username,
'attachments': attachments,
'parse': 'none',
'icon_url': ('http://cdn2.hubspot.net/hub/330046/'
'file-449187601-png/ansible_badge.png'),
}
data = json.dumps(payload)
self._display.debug(data)
self._display.debug(self.webhook_url)
try:
response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
headers=headers)
return response.read()
except Exception as e:
self._display.warning(u'Could not submit message to Slack: %s' %
to_text(e))
def v2_playbook_on_start(self, playbook):
self.playbook_name = os.path.basename(playbook._file_name)
title = [
'*Playbook initiated* (_%s_)' % self.guid
]
invocation_items = []
if context.CLIARGS and self.show_invocation:
tags = context.CLIARGS['tags']
skip_tags = context.CLIARGS['skip_tags']
extra_vars = context.CLIARGS['extra_vars']
subset = context.CLIARGS['subset']
inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
invocation_items.append('Inventory: %s' % ', '.join(inventory))
if tags and tags != ['all']:
invocation_items.append('Tags: %s' % ', '.join(tags))
if skip_tags:
invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))
if subset:
invocation_items.append('Limit: %s' % subset)
if extra_vars:
invocation_items.append('Extra Vars: %s' %
' '.join(extra_vars))
title.append('by *%s*' % context.CLIARGS['remote_user'])
title.append('\n\n*%s*' % self.playbook_name)
msg_items = [' '.join(title)]
if invocation_items:
msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
msg = '\n'.join(msg_items)
attachments = [{
'fallback': msg,
'fields': [
{
'value': msg
}
],
'color': 'warning',
'mrkdwn_in': ['text', 'fallback', 'fields'],
}]
self.send_msg(attachments=attachments)
def v2_playbook_on_play_start(self, play):
"""Display Play start messages"""
name = play.name or 'Play name not specified (%s)' % play._uuid
msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
attachments = [
{
'fallback': msg,
'text': msg,
'color': 'warning',
'mrkdwn_in': ['text', 'fallback', 'fields'],
}
]
self.send_msg(attachments=attachments)
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
'Failures', 'Rescued', 'Ignored'])
failures = False
unreachable = False
for h in hosts:
s = stats.summarize(h)
if s['failures'] > 0:
failures = True
if s['unreachable'] > 0:
unreachable = True
t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
'failures', 'rescued', 'ignored']])
attachments = []
msg_items = [
'*Playbook Complete* (_%s_)' % self.guid
]
if failures or unreachable:
color = 'danger'
msg_items.append('\n*Failed!*')
else:
color = 'good'
msg_items.append('\n*Success!*')
msg_items.append('```\n%s\n```' % t)
msg = '\n'.join(msg_items)
attachments.append({
'fallback': msg,
'fields': [
{
'value': msg
}
],
'color': color,
'mrkdwn_in': ['text', 'fallback', 'fields']
})
self.send_msg(attachments=attachments)
| gpl-3.0 | -8,072,981,300,330,846,000 | -5,335,752,206,670,113,000 | 31.777778 | 116 | 0.536683 | false |
annahs/atmos_research | WHI_long_term_2min_data_to_db.py | 1 | 8596 | import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import calendar
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import dates
start = datetime(2009,7,15,4) #2009 - 20090628 2010 - 20100610 2012 - 20100405
end = datetime(2009,8,17) #2009 - 20090816 2010 - 20100726 2012 - 20100601
timestep = 6.#1./30 #hours
sample_min = 117 #117 for all 2009-2012
sample_max = 123 #123 for all 2009-2012
yag_min = 3.8 #3.8 for all 2009-2012
yag_max = 6 #6 for all 2009-2012
BC_VED_min = 70
BC_VED_max = 220
min_scat_pkht = 20
mass_min = ((BC_VED_min/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
mass_max = ((BC_VED_max/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
lag_threshold_2009 = 0.1
lag_threshold_2010 = 0.25
lag_threshold_2012 = 1.5
print 'mass limits', mass_min, mass_max
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def check_spike_times(particle_start_time,particle_end_time):
cursor.execute('''SELECT count(*)
FROM whi_spike_times_2009to2012
WHERE (spike_start_UTC <= %s AND spike_end_UTC > %s)
OR (spike_start_UTC <= %s AND spike_end_UTC > %s)
''',
(particle_start_time,particle_start_time,particle_end_time,particle_end_time))
spike_count = cursor.fetchall()[0][0]
return spike_count
def get_hysplit_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_hysplit_hourly_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
hy_id_list = cursor.fetchall()
if hy_id_list == []:
hy_id = None
else:
hy_id = hy_id_list[0][0]
return hy_id
def get_met_info(particle_start_time):
cursor.execute('''SELECT id,pressure_Pa,room_temp_C
FROM whi_sampling_conditions
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
met_list = cursor.fetchall()
if met_list == []:
met_list = [[np.nan,np.nan,np.nan]]
return met_list[0]
def get_gc_id(particle_start_time):
cursor.execute('''SELECT id
FROM whi_gc_hourly_bc_data
WHERE (UNIX_UTC_start_time <= %s AND UNIX_UTC_end_time > %s)
''',
(particle_start_time,particle_start_time))
gc_id_list = cursor.fetchall()
if gc_id_list == []:
gc_id = None
else:
gc_id = gc_id_list[0][0]
return gc_id
def get_sample_factor(UNIX_start):
date_time = datetime.utcfromtimestamp(UNIX_start)
sample_factors_2012 = [
[datetime(2012,4,4,19,43,4), datetime(2012,4,5,13,47,9), 3.0],
[datetime(2012,4,5,13,47,9), datetime(2012,4,10,3,3,25), 1.0],
[datetime(2012,4,10,3,3,25), datetime(2012,5,16,6,9,13), 3.0],
[datetime(2012,5,16,6,9,13), datetime(2012,6,7,18,14,39), 10.0],
]
if date_time.year in [2009,2010]:
sample_factor = 1.0
if date_time.year == 2012:
for date_range in sample_factors_2012:
start_date = date_range[0]
end_date = date_range[1]
range_sample_factor = date_range[2]
if start_date<= date_time < end_date:
sample_factor = range_sample_factor
return sample_factor
def lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos):
long_lags = 0
short_lags = 0
lag_time = np.nan
if (-10 < lag_time < 10):
lag_time = (BB_incand_pk_pos-BB_scat_pk_pos)*0.2 #us
if start_dt.year == 2009 and lag_time > lag_threshold_2009:
long_lags = 1
elif start_dt.year == 2010 and lag_time > lag_threshold_2010:
long_lags = 1
elif start_dt.year == 2012 and lag_time > lag_threshold_2012:
long_lags = 1
else:
short_lags = 1
return [lag_time,long_lags,short_lags]
#query to add 1h mass conc data
add_data = ('''INSERT INTO whi_sp2_2min_data
(UNIX_UTC_start_time,UNIX_UTC_end_time,number_particles,rBC_mass_conc,rBC_mass_conc_err,volume_air_sampled,sampling_duration,mean_lag_time,sample_factor,hysplit_hourly_id,whi_sampling_cond_id,gc_hourly_id)
VALUES (%(UNIX_UTC_start_time)s,%(UNIX_UTC_end_time)s,%(number_particles)s,%(rBC_mass_conc)s,%(rBC_mass_conc_err)s,%(volume_air_sampled)s,%(sampling_duration)s,%(mean_lag_time)s,%(sample_factor)s,%(hysplit_hourly_id)s,%(whi_sampling_cond_id)s,%(gc_hourly_id)s)'''
)
#
multiple_records = []
i=1
while start <= end:
long_lags = 0
short_lags = 0
if (4 <= start.hour < 16):
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = UNIX_start + timestep*3600.0
print start, UNIX_start+60
print datetime.utcfromtimestamp(UNIX_end)
#filter on hk data here
cursor.execute('''(SELECT
mn.UNIX_UTC_ts_int_start,
mn.UNIX_UTC_ts_int_end,
mn.rBC_mass_fg_BBHG,
mn.rBC_mass_fg_BBHG_err,
mn.BB_incand_pk_pos,
mn.BB_scat_pk_pos,
mn.BB_scat_pkht,
hk.sample_flow,
mn.BB_incand_HG
FROM whi_sp2_particle_data mn
FORCE INDEX (hourly_binning)
JOIN whi_hk_data hk on mn.HK_id = hk.id
WHERE
mn.UNIX_UTC_ts_int_start >= %s
AND mn.UNIX_UTC_ts_int_end < %s
AND hk.sample_flow >= %s
AND hk.sample_flow < %s
AND hk.yag_power >= %s
AND hk.yag_power < %s)''',
(UNIX_start,UNIX_end,sample_min,sample_max,yag_min,yag_max))
ind_data = cursor.fetchall()
data={
'rBC_mass_fg':[],
'rBC_mass_fg_err':[],
'lag_time':[]
}
total_sample_vol = 0
for row in ind_data:
ind_start_time = float(row[0])
ind_end_time = float(row[1])
bbhg_mass_corr11 = float(row[2])
bbhg_mass_corr_err = float(row[3])
BB_incand_pk_pos = float(row[4])
BB_scat_pk_pos = float(row[5])
BB_scat_pk_ht = float(row[6])
sample_flow = float(row[7]) #in vccm
incand_pkht = float(row[8])
#filter spike times here
if check_spike_times(ind_start_time,ind_end_time):
print 'spike'
continue
#skip the long interval
if (ind_end_time - ind_start_time) > 540:
print 'long interval'
continue
#skip if no sample flow
if sample_flow == None:
print 'no flow'
continue
#get sampling conditions id and met conditions
met_data = get_met_info(UNIX_start)
met_id = met_data[0]
pressure = met_data[1]
temperature = met_data[2]+273.15
correction_factor_for_STP = (273*pressure)/(101325*temperature)
sample_vol = (sample_flow*(ind_end_time-ind_start_time)/60)*correction_factor_for_STP #/60 b/c sccm and time in secs
total_sample_vol = total_sample_vol + sample_vol
bbhg_mass_corr = 0.01244+0.0172*incand_pkht
if (mass_min <= bbhg_mass_corr < mass_max):
#get sample factor
sample_factor = get_sample_factor(UNIX_start)
data['rBC_mass_fg'].append(bbhg_mass_corr*sample_factor)
data['rBC_mass_fg_err'].append(bbhg_mass_corr_err)
#only calc lag time if there is a scattering signal
if BB_scat_pk_ht > min_scat_pkht:
lags = lag_time_calc(BB_incand_pk_pos,BB_scat_pk_pos)
data['lag_time'].append(lags[0])
long_lags += lags[1]
short_lags += lags[2]
tot_rBC_mass_fg = sum(data['rBC_mass_fg'])
tot_rBC_mass_uncer = sum(data['rBC_mass_fg_err'])
rBC_number = len(data['rBC_mass_fg'])
mean_lag = float(np.mean(data['lag_time']))
if np.isnan(mean_lag):
mean_lag = None
#get hysplit_id
hysplit_id = None #get_hysplit_id(UNIX_start)
#get GC id
gc_id = None #get_gc_id(UNIX_start)
if total_sample_vol != 0:
mass_conc = (tot_rBC_mass_fg/total_sample_vol)
mass_conc_uncer = (tot_rBC_mass_uncer/total_sample_vol)
#add to db
single_record = {
'UNIX_UTC_start_time' :UNIX_start,
'UNIX_UTC_end_time' :UNIX_end,
'number_particles' :rBC_number,
'rBC_mass_conc' :mass_conc,
'rBC_mass_conc_err' :mass_conc_uncer,
'volume_air_sampled' :total_sample_vol,
'sampling_duration' :(total_sample_vol/2),
'mean_lag_time' :mean_lag,
'number_long_lag' :long_lags,
'number_short_lag' :short_lags,
'sample_factor' :sample_factor,
'hysplit_hourly_id' :hysplit_id,
'whi_sampling_cond_id' :met_id,
'gc_hourly_id' :gc_id,
}
multiple_records.append((single_record))
#bulk insert to db table
if i%1 == 0:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
#increment count
i+= 1
start += timedelta(hours = timestep)
#bulk insert of remaining records to db
if multiple_records != []:
cursor.executemany(add_data, multiple_records)
cnx.commit()
multiple_records = []
cnx.close()
| mit | -7,939,987,477,705,727,000 | -7,753,730,763,840,089,000 | 28.040541 | 268 | 0.640181 | false |
turicas/outputty | tests/test_Table_html.py | 2 | 5790 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2011 Álvaro Justen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import tempfile
import os
from textwrap import dedent
from outputty import Table
class TestTableHtml(unittest.TestCase):
def test_to_html_should_without_parameters_should_return_string(self):
my_table = Table(headers=['ham', 'spam', 'eggs'])
self.assertTrue(isinstance(my_table.write('html'), str))
def test_to_html_with_only_headers(self):
my_table = Table(headers=['ham', 'spam', 'eggs', 'blah'])
output = my_table.write('html', css_classes=False)
expected = dedent('''
<table>
<thead>
<tr>
<th>ham</th>
<th>spam</th>
<th>eggs</th>
<th>blah</th>
</tr>
</thead>
</table>
''').strip()
self.assertEquals(output, expected)
def test_to_html_with_headers_and_some_rows(self):
my_table = Table(headers=['ham', 'spam', 'eggs'])
my_table.append(['python', 'rules', '!'])
my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
output = my_table.write('html', css_classes=False)
expected = dedent('''
<table>
<thead>
<tr>
<th>ham</th>
<th>spam</th>
<th>eggs</th>
</tr>
</thead>
<tbody>
<tr>
<td>python</td>
<td>rules</td>
<td>!</td>
</tr>
<tr>
<td>spam</td>
<td>eggs</td>
<td>ham</td>
</tr>
</tbody>
</table>
''').strip()
self.assertEquals(output, expected)
def test_to_html_with_headers_and_rows_with_some_columns_empty(self):
my_table = Table(headers=['ham', 'spam', 'eggs'])
my_table.append({'ham': 'spam'})
my_table.append({'spam': 'eggs'})
my_table.append({'eggs': 'ham'})
output = my_table.write('html', css_classes=False)
expected = dedent('''
<table>
<thead>
<tr>
<th>ham</th>
<th>spam</th>
<th>eggs</th>
</tr>
</thead>
<tbody>
<tr>
<td>spam</td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td>eggs</td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td>ham</td>
</tr>
</tbody>
</table>
''').strip()
self.assertEquals(output, expected)
def test_to_html_with_a_parameter_should_save_a_file(self):
temp_fp = tempfile.NamedTemporaryFile(delete=False)
temp_fp.close()
my_table = Table(headers=['ham', 'spam', 'eggs'])
my_table.append(['python', 'rules', '!'])
my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
my_table.write('html', temp_fp.name, css_classes=False)
temp_fp = open(temp_fp.name)
output = temp_fp.read()
temp_fp.close()
os.remove(temp_fp.name)
expected = dedent('''
<table>
<thead>
<tr>
<th>ham</th>
<th>spam</th>
<th>eggs</th>
</tr>
</thead>
<tbody>
<tr>
<td>python</td>
<td>rules</td>
<td>!</td>
</tr>
<tr>
<td>spam</td>
<td>eggs</td>
<td>ham</td>
</tr>
</tbody>
</table>
''').strip()
self.assertEquals(output, expected)
def test_to_html_should_create_CSS_classes_for_odd_and_even_rows(self):
my_table = Table(headers=['ham', 'spam', 'eggs'])
my_table.append(['python', 'rules', '!'])
my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
my_table.append(['python', 'rules', '!'])
my_table.append({'ham': 'spam', 'spam': 'eggs', 'eggs': 'ham'})
output = my_table.write('html', css_classes=True)
expected = dedent('''
<table>
<thead>
<tr class="header">
<th>ham</th>
<th>spam</th>
<th>eggs</th>
</tr>
</thead>
<tbody>
<tr class="odd">
<td>python</td>
<td>rules</td>
<td>!</td>
</tr>
<tr class="even">
<td>spam</td>
<td>eggs</td>
<td>ham</td>
</tr>
<tr class="odd">
<td>python</td>
<td>rules</td>
<td>!</td>
</tr>
<tr class="even">
<td>spam</td>
<td>eggs</td>
<td>ham</td>
</tr>
</tbody>
</table>
''').strip()
self.assertEquals(output, expected)
#TODO: test input and output encoding
| gpl-3.0 | -3,693,840,685,772,805,600 | -8,443,724,157,478,622,000 | 29.308901 | 75 | 0.457419 | false |
varunarya10/tempest | tempest/tests/fake_http.py | 42 | 2411 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import httplib2
class fake_httplib2(object):
def __init__(self, return_type=None, *args, **kwargs):
self.return_type = return_type
def request(self, uri, method="GET", body=None, headers=None,
redirections=5, connection_type=None):
if not self.return_type:
fake_headers = httplib2.Response(headers)
return_obj = {
'uri': uri,
'method': method,
'body': body,
'headers': headers
}
return (fake_headers, return_obj)
elif isinstance(self.return_type, int):
body = "fake_body"
header_info = {
'content-type': 'text/plain',
'status': str(self.return_type),
'content-length': len(body)
}
resp_header = httplib2.Response(header_info)
return (resp_header, body)
else:
msg = "unsupported return type %s" % self.return_type
raise TypeError(msg)
class fake_httplib(object):
def __init__(self, headers, body=None,
version=1.0, status=200, reason="Ok"):
"""
:param headers: dict representing HTTP response headers
:param body: file-like object
:param version: HTTP Version
:param status: Response status code
:param reason: Status code related message.
"""
self.body = body
self.status = status
self.reason = reason
self.version = version
self.headers = headers
def getheaders(self):
return copy.deepcopy(self.headers).items()
def getheader(self, key, default):
return self.headers.get(key, default)
def read(self, amt):
return self.body.read(amt)
| apache-2.0 | 2,902,668,505,906,354,000 | 7,627,582,768,308,781,000 | 32.027397 | 78 | 0.59353 | false |
inviwo/inviwo | data/scripts/matplotlib_create_transferfunction.py | 2 | 1270 | # Inviwo Python script
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import inviwopy
from inviwopy.glm import vec2,vec3,vec4
#http://matplotlib.org/examples/color/colormaps_reference.html
#Perceptually Uniform Sequential : #['viridis', 'inferno', 'plasma', 'magma']
#Sequential : #['Blues', 'BuGn', 'BuPu','GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu','Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
#Diverging : #['afmhot', 'autumn', 'bone', 'cool','copper', 'gist_heat', 'gray', 'hot','pink', 'spring', 'summer', 'winter']
#Qualitative : #['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'seismic']
#Miscellaneous : #['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3']
#Sequential : #['gist_earth', 'terrain', 'ocean', 'gist_stern','brg', 'CMRmap', 'cubehelix','gnuplot', 'gnuplot2', 'gist_ncar', 'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism']
tf = inviwopy.app.network.VolumeRaycaster.transferFunction
tf.clear()
cmapName = "viridis"
cmap=plt.get_cmap(cmapName)
N = 128
for i in range(0,N,1):
x = i / (N-1)
a = 1.0
color = cmap(x)
tf.add(x, vec4(color[0],color[1],color[2], a))
| bsd-2-clause | -4,376,937,243,387,703,300 | 5,206,964,527,113,374,000 | 39.967742 | 203 | 0.634646 | false |
Codefans-fan/odoo | openerp/addons/base/tests/test_orm.py | 20 | 17911 | from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
DB = common.DB
class TestORM(common.TransactionCase):
""" test special behaviors of ORM CRUD functions
TODO: use real Exceptions types instead of Exception """
def setUp(self):
super(TestORM, self).setUp()
cr, uid = self.cr, self.uid
self.partner = self.registry('res.partner')
self.users = self.registry('res.users')
self.p1 = self.partner.name_create(cr, uid, 'W')[0]
self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
self.ir_rule = self.registry('ir.rule')
# sample unprivileged user
employee_gid = self.ref('base.group_user')
self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
@mute_logger('openerp.models')
def testAccessDeletedRecords(self):
""" Verify that accessing deleted records works as expected """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
self.partner.unlink(cr, uid, [p1])
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
# Updating an already deleted record should raise, even as admin
with self.assertRaises(Exception):
self.partner.write(cr, uid, [p1], {'name': 'foo'})
@mute_logger('openerp.models')
def testAccessFilteredRecords(self):
""" Verify that accessing filtered records works as expected for non-admin user """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
'domain_force': [('id', '!=', p1)],
'model_id': partner_model})
# search as unprivileged user
partners = self.partner.search(cr, uid2, [])
self.assertFalse(p1 in partners, "W should not be visible...")
self.assertTrue(p2 in partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1], ['name'])
# write as unprivileged user
with self.assertRaises(Exception):
self.partner.write(cr, uid2, [p1], {'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1])
# Prepare mixed case
self.partner.unlink(cr, uid, [p2])
# read mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1,p2], ['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1,p2])
def test_multi_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
records = self.partner.read(self.cr, UID, [record_id])
self.assertIsInstance(records, list)
def test_one_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
record = self.partner.read(self.cr, UID, record_id)
self.assertIsInstance(record, dict)
@mute_logger('openerp.models')
def test_search_read(self):
# simple search_read
self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertTrue('id' in found[0])
# search_read correct order
self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
self.assertEqual(len(found), 0)
def test_exists(self):
partner = self.partner.browse(self.cr, UID, [])
# check that records obtained from search exist
recs = partner.search([])
self.assertTrue(recs)
self.assertEqual(recs.exists(), recs)
# check that there is no record with id 0
recs = partner.browse([0])
self.assertFalse(recs.exists())
def test_groupby_date(self):
partners = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
all_partners = []
partners_by_day = defaultdict(set)
partners_by_month = defaultdict(set)
partners_by_year = defaultdict(set)
for name, date in partners.items():
p = self.partner.create(self.cr, UID, dict(name=name, date=date))
all_partners.append(p)
partners_by_day[date].add(p)
partners_by_month[date.rsplit('-', 1)[0]].add(p)
partners_by_year[date.split('-', 1)[0]].add(p)
def read_group(interval, domain=None):
main_domain = [('id', 'in', all_partners)]
if domain:
domain = ['&'] + main_domain + domain
else:
domain = main_domain
rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
result = {}
for r in rg:
result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
return result
self.assertEqual(len(read_group('day')), len(partners_by_day))
self.assertEqual(len(read_group('month')), len(partners_by_month))
self.assertEqual(len(read_group('year')), len(partners_by_year))
rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)],
['date'], ['date:month', 'date:day'], lazy=False)
self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def setUp(self):
super(TestInherits, self).setUp()
self.partner = self.registry('res.partner')
self.user = self.registry('res.users')
def test_default(self):
""" `default_get` cannot return a dictionary or a new id """
defaults = self.user.default_get(self.cr, UID, ['partner_id'])
if 'partner_id' in defaults:
self.assertIsInstance(defaults['partner_id'], (bool, int, long))
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo = self.user.browse(self.cr, UID, foo_id)
self.assertNotIn(foo.partner_id.id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(set(partners_before), set(partners_after))
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(foo.partner_id.id, par_id)
@mute_logger('openerp.models')
def test_read(self):
""" inherited fields should be read without any indirection """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_values, = self.user.read(self.cr, UID, [foo_id])
partner_id = foo_values['partner_id'][0]
partner_values, = self.partner.read(self.cr, UID, [partner_id])
self.assertEqual(foo_values['name'], partner_values['name'])
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, foo.partner_id.name)
@mute_logger('openerp.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
bar_id = self.user.copy(self.cr, UID, foo_id, {'login': 'bar', 'password': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
self.assertEqual(foo_before, foo_after)
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertEqual(bar.login, 'bar')
self.assertNotEqual(foo.id, bar.id)
self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
@mute_logger('openerp.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
'login_date': '2016-01-01', 'signature': 'XXX'})
par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
partners_before = self.partner.search(self.cr, UID, [])
bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(foo_before, foo_after)
self.assertEqual(set(partners_before), set(partners_after))
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertNotEqual(foo.id, bar.id)
self.assertEqual(bar.partner_id.id, par_id)
self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
self.assertFalse(bar.login_date, "login_date should not be copied from original record")
self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
self.assertEqual(bar.signature, foo.signature, "signature should be copied")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
"sort dictionaries by their 'id' field; useful for comparisons"
return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
""" test the orm method 'write' on one2many fields """
def setUp(self):
super(TestO2MSerialization, self).setUp()
self.partner = self.registry('res.partner')
def test_no_command(self):
" empty list of commands yields an empty list of records "
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [])
self.assertEqual(results, [])
def test_CREATE_commands(self):
" returns the VALUES dict as-is "
values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', map(CREATE, values))
self.assertEqual(results, values)
def test_LINK_TO_command(self):
" reads the records from the database, records are returned with their ids. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(LINK_TO, ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_bare_ids_command(self):
" same as the equivalent LINK_TO commands "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', ids, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_UPDATE_command(self):
" take the in-db records and merge the provided information in "
id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
LINK_TO(id_foo),
UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
UPDATE(id_baz, {'name': 'quux'})
], ['name', 'city'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': id_foo, 'name': 'foo', 'city': False},
{'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
{'id': id_baz, 'name': 'quux', 'city': 'tag'}
]))
def test_DELETE_command(self):
" deleted records are not returned at all. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(results, [])
def test_mixed_commands(self):
ids = [
self.partner.create(self.cr, UID, {'name': name})
for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
CREATE({'name': 'foo'}),
UPDATE(ids[0], {'name': 'bar'}),
LINK_TO(ids[1]),
DELETE(ids[2]),
UPDATE(ids[3], {'name': 'quux',}),
UPDATE(ids[4], {'name': 'corge'}),
CREATE({'name': 'grault'}),
LINK_TO(ids[5])
], ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'name': 'foo'},
{'id': ids[0], 'name': 'bar'},
{'id': ids[1], 'name': 'baz'},
{'id': ids[3], 'name': 'quux'},
{'id': ids[4], 'name': 'corge'},
{'name': 'grault'},
{'id': ids[5], 'name': 'garply'}
]))
def test_LINK_TO_pairs(self):
"LINK_TO commands can be written as pairs, instead of triplets"
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(lambda id: (4, id), ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_singleton_commands(self):
"DELETE_ALL can appear as a singleton"
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,223,248,471,134,870,000 | 8,319,836,732,641,246,000 | 41.645238 | 137 | 0.572218 | false |
alirizakeles/memopol-core | memopol/reps/api.py | 2 | 2647 | from tastypie import fields
from tastypie.resources import ModelResource
from memopol.reps.models import Party,\
Opinion,\
Representative,\
PartyRepresentative,\
Email,\
CV,\
WebSite,\
OpinionREP
class REPPartyResource(ModelResource):
partyrepresentative_set = fields.ToManyField("memopol.reps.api.REPPartyRepresentativeResource", "partyrepresentative_set")
class Meta:
queryset = Party.objects.all()
class REPOpinionResource(ModelResource):
opinionrep_set = fields.ToManyField("memopol.reps.api.REPOpinionREPResource", "opinionrep_set")
class Meta:
queryset = Opinion.objects.all()
class REPRepresentativeResource(ModelResource):
opinionrep_set = fields.ToManyField("memopol.reps.api.REPOpinionREPResource", "opinionrep_set")
email_set = fields.ToManyField("memopol.reps.api.REPEmailResource", "email_set")
website_set = fields.ToManyField("memopol.reps.api.REPWebSiteResource", "website_set")
cv_set = fields.ToManyField("memopol.reps.api.REPCVResource", "cv_set")
partyrepresentative_set = fields.ToManyField("memopol.reps.api.REPPartyRepresentativeResource", "partyrepresentative_set")
score_set = fields.ToManyField("votes.api.ScoreResource", "score_set")
vote_set = fields.ToManyField("votes.api.VoteResource", "vote_set")
class Meta:
queryset = Representative.objects.all()
class REPPartyRepresentativeResource(ModelResource):
representative = fields.ForeignKey(REPRepresentativeResource, "representative")
party = fields.ForeignKey(REPPartyResource, "party")
class Meta:
queryset = PartyRepresentative.objects.all()
class REPEmailResource(ModelResource):
representative = fields.ForeignKey(REPRepresentativeResource, "representative")
class Meta:
queryset = Email.objects.all()
class REPCVResource(ModelResource):
representative = fields.ForeignKey(REPRepresentativeResource, "representative")
class Meta:
queryset = CV.objects.all()
class REPWebSiteResource(ModelResource):
representative = fields.ForeignKey(REPRepresentativeResource, "representative")
class Meta:
queryset = WebSite.objects.all()
class REPOpinionREPResource(ModelResource):
representative = fields.ForeignKey(REPRepresentativeResource, "representative")
opinion = fields.ForeignKey(REPOpinionResource, "opinion")
class Meta:
queryset = OpinionREP.objects.all()
| gpl-3.0 | 2,599,742,204,239,988,000 | -3,196,161,513,034,924,000 | 34.77027 | 126 | 0.694371 | false |
ndtran/compassion-switzerland | sponsorship_switzerland/__openerp__.py | 2 | 1853 | # -*- encoding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Tailor Sponsorships to Compassion CH needs',
'version': '1.0',
'category': 'Other',
'author': 'Compassion CH',
'website': 'http://www.compassion.ch',
'depends': ['sponsorship_tracking'],
'data': [
'view/contracts_view.xml',
'data/install.xml'],
'js': ['static/src/js/sponsorship_tracking_kanban.js'],
'demo': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 | -5,463,751,273,843,552,000 | 6,288,790,295,555,642,000 | 39.177778 | 78 | 0.474906 | false |
redhat-openstack/swift | swift/common/middleware/cname_lookup.py | 29 | 6766 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CNAME Lookup Middleware
Middleware that translates an unknown domain in the host header to
something that ends with the configured storage_domain by looking up
the given domain's CNAME record in DNS.
This middleware will continue to follow a CNAME chain in DNS until it finds
a record ending in the configured storage domain or it reaches the configured
maximum lookup depth. If a match is found, the environment's Host header is
rewritten and the request is passed further down the WSGI chain.
"""
from six.moves import range
import socket
from swift import gettext_ as _
try:
import dns.resolver
from dns.exception import DNSException
from dns.resolver import NXDOMAIN, NoAnswer
except ImportError:
# catch this to allow docs to be built without the dependency
MODULE_DEPENDENCY_MET = False
else: # executed if the try block finishes with no errors
MODULE_DEPENDENCY_MET = True
from swift.common.swob import Request, HTTPBadRequest
from swift.common.utils import cache_from_env, get_logger, list_from_csv
def lookup_cname(domain): # pragma: no cover
"""
Given a domain, returns its DNS CNAME mapping and DNS ttl.
:param domain: domain to query on
:returns: (ttl, result)
"""
try:
answer = dns.resolver.query(domain, 'CNAME').rrset
ttl = answer.ttl
result = answer.items[0].to_text()
result = result.rstrip('.')
return ttl, result
except (DNSException, NXDOMAIN, NoAnswer):
return 0, None
def is_ip(domain):
try:
socket.inet_pton(socket.AF_INET, domain)
return True
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, domain)
return True
except socket.error:
return False
class CNAMELookupMiddleware(object):
"""
CNAME Lookup Middleware
See above for a full description.
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
if not MODULE_DEPENDENCY_MET:
# reraise the exception if the dependency wasn't met
raise ImportError('dnspython is required for this module')
self.app = app
storage_domain = conf.get('storage_domain', 'example.com')
self.storage_domain = ['.' + s for s in
list_from_csv(storage_domain)
if not s.startswith('.')]
self.storage_domain += [s for s in list_from_csv(storage_domain)
if s.startswith('.')]
self.lookup_depth = int(conf.get('lookup_depth', '1'))
self.memcache = None
self.logger = get_logger(conf, log_route='cname-lookup')
def _domain_endswith_in_storage_domain(self, a_domain):
for domain in self.storage_domain:
if a_domain.endswith(domain):
return True
return False
def __call__(self, env, start_response):
if not self.storage_domain:
return self.app(env, start_response)
if 'HTTP_HOST' in env:
given_domain = env['HTTP_HOST']
else:
given_domain = env['SERVER_NAME']
port = ''
if ':' in given_domain:
given_domain, port = given_domain.rsplit(':', 1)
if is_ip(given_domain):
return self.app(env, start_response)
a_domain = given_domain
if not self._domain_endswith_in_storage_domain(a_domain):
if self.memcache is None:
self.memcache = cache_from_env(env)
error = True
for tries in range(self.lookup_depth):
found_domain = None
if self.memcache:
memcache_key = ''.join(['cname-', a_domain])
found_domain = self.memcache.get(memcache_key)
if not found_domain:
ttl, found_domain = lookup_cname(a_domain)
if self.memcache:
memcache_key = ''.join(['cname-', given_domain])
self.memcache.set(memcache_key, found_domain,
time=ttl)
if found_domain is None or found_domain == a_domain:
# no CNAME records or we're at the last lookup
error = True
found_domain = None
break
elif self._domain_endswith_in_storage_domain(found_domain):
# Found it!
self.logger.info(
_('Mapped %(given_domain)s to %(found_domain)s') %
{'given_domain': given_domain,
'found_domain': found_domain})
if port:
env['HTTP_HOST'] = ':'.join([found_domain, port])
else:
env['HTTP_HOST'] = found_domain
error = False
break
else:
# try one more deep in the chain
self.logger.debug(
_('Following CNAME chain for '
'%(given_domain)s to %(found_domain)s') %
{'given_domain': given_domain,
'found_domain': found_domain})
a_domain = found_domain
if error:
if found_domain:
msg = 'CNAME lookup failed after %d tries' % \
self.lookup_depth
else:
msg = 'CNAME lookup failed to resolve to a valid domain'
resp = HTTPBadRequest(request=Request(env), body=msg,
content_type='text/plain')
return resp(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf): # pragma: no cover
conf = global_conf.copy()
conf.update(local_conf)
def cname_filter(app):
return CNAMELookupMiddleware(app, conf)
return cname_filter
| apache-2.0 | 4,530,165,919,614,941,700 | -4,575,631,788,970,308,600 | 36.381215 | 77 | 0.572421 | false |
tzewangdorje/SIPserv | Twisted-13.1.0/twisted/test/test_hook.py | 41 | 4250 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.hook module.
"""
from twisted.python import hook
from twisted.trial import unittest
class BaseClass:
"""
dummy class to help in testing.
"""
def __init__(self):
"""
dummy initializer
"""
self.calledBasePre = 0
self.calledBasePost = 0
self.calledBase = 0
def func(self, a, b):
"""
dummy method
"""
assert a == 1
assert b == 2
self.calledBase = self.calledBase + 1
class SubClass(BaseClass):
"""
another dummy class
"""
def __init__(self):
"""
another dummy initializer
"""
BaseClass.__init__(self)
self.calledSubPre = 0
self.calledSubPost = 0
self.calledSub = 0
def func(self, a, b):
"""
another dummy function
"""
assert a == 1
assert b == 2
BaseClass.func(self, a, b)
self.calledSub = self.calledSub + 1
_clean_BaseClass = BaseClass.__dict__.copy()
_clean_SubClass = SubClass.__dict__.copy()
def basePre(base, a, b):
"""
a pre-hook for the base class
"""
base.calledBasePre = base.calledBasePre + 1
def basePost(base, a, b):
"""
a post-hook for the base class
"""
base.calledBasePost = base.calledBasePost + 1
def subPre(sub, a, b):
"""
a pre-hook for the subclass
"""
sub.calledSubPre = sub.calledSubPre + 1
def subPost(sub, a, b):
"""
a post-hook for the subclass
"""
sub.calledSubPost = sub.calledSubPost + 1
class HookTestCase(unittest.TestCase):
"""
test case to make sure hooks are called
"""
def setUp(self):
"""Make sure we have clean versions of our classes."""
BaseClass.__dict__.clear()
BaseClass.__dict__.update(_clean_BaseClass)
SubClass.__dict__.clear()
SubClass.__dict__.update(_clean_SubClass)
def testBaseHook(self):
"""make sure that the base class's hook is called reliably
"""
base = BaseClass()
self.assertEqual(base.calledBase, 0)
self.assertEqual(base.calledBasePre, 0)
base.func(1,2)
self.assertEqual(base.calledBase, 1)
self.assertEqual(base.calledBasePre, 0)
hook.addPre(BaseClass, "func", basePre)
base.func(1, b=2)
self.assertEqual(base.calledBase, 2)
self.assertEqual(base.calledBasePre, 1)
hook.addPost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEqual(base.calledBasePost, 1)
self.assertEqual(base.calledBase, 3)
self.assertEqual(base.calledBasePre, 2)
hook.removePre(BaseClass, "func", basePre)
hook.removePost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEqual(base.calledBasePost, 1)
self.assertEqual(base.calledBase, 4)
self.assertEqual(base.calledBasePre, 2)
def testSubHook(self):
"""test interactions between base-class hooks and subclass hooks
"""
sub = SubClass()
self.assertEqual(sub.calledSub, 0)
self.assertEqual(sub.calledBase, 0)
sub.func(1, b=2)
self.assertEqual(sub.calledSub, 1)
self.assertEqual(sub.calledBase, 1)
hook.addPre(SubClass, 'func', subPre)
self.assertEqual(sub.calledSub, 1)
self.assertEqual(sub.calledBase, 1)
self.assertEqual(sub.calledSubPre, 0)
self.assertEqual(sub.calledBasePre, 0)
sub.func(1, b=2)
self.assertEqual(sub.calledSub, 2)
self.assertEqual(sub.calledBase, 2)
self.assertEqual(sub.calledSubPre, 1)
self.assertEqual(sub.calledBasePre, 0)
# let the pain begin
hook.addPre(BaseClass, 'func', basePre)
BaseClass.func(sub, 1, b=2)
# sub.func(1, b=2)
self.assertEqual(sub.calledBase, 3)
self.assertEqual(sub.calledBasePre, 1, str(sub.calledBasePre))
sub.func(1, b=2)
self.assertEqual(sub.calledBasePre, 2)
self.assertEqual(sub.calledBase, 4)
self.assertEqual(sub.calledSubPre, 2)
self.assertEqual(sub.calledSub, 3)
testCases = [HookTestCase]
| gpl-3.0 | 1,740,794,069,111,888,100 | 1,391,452,099,938,987,800 | 27.333333 | 72 | 0.599765 | false |
xyguo/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause | -159,324,611,316,039,870 | -7,372,386,474,535,490,000 | 25.609756 | 74 | 0.610449 | false |
Luxoft/Twister | binaries/GitPlugin/Git/GITPlugin.py | 3 | 9694 |
# version: 2.006
import os, sys
import shutil
import time
import pexpect
from BasePlugin import BasePlugin
#
class Plugin(BasePlugin):
"""
GIT Plugin has a few parameters:
- server complete path
- branch used for clone
- user and password to connect to server
- snapshot folder, where all data is cloned
If command is Snapshot, execute a GIT clone;
if the Snapshot folder is already present, delete it, then GIT clone.
If command is Update and Overwrite is false, execute a GIT checkout and GIT pull on the specified branch;
if Overwrite is true, delete the folder, then GIT clone for the specified branch.
"""
def run(self, args):
src = self.data.get('server')
dst = self.data.get('snapshot')
if not args.get('command'):
return '*ERROR* Must specify a command like `snapshot` or `update` !'
if args['command'] == ['snapshot']:
return self.execCheckout(src, dst, 'clone', overwrite=True)
elif args['command'] == ['update'] and args['overwrite'] == ['false']:
return self.execCheckout(src, dst, 'pull', overwrite=False)
elif args['command'] == ['update'] and args['overwrite'] == ['true']:
return self.execCheckout(src, dst, 'pull', overwrite=True)
elif args['command'] == ['delete']:
return self.execCheckout('', '', '', overwrite=True)
else:
return 'Invalid command: `{} & {}`!'.format(args['command'], args['overwrite'])
def execCheckout(self, src, dst, command, overwrite=False):
usr = self.data['username']
pwd = self.data['password']
child = pexpect.spawn(['bash'])
child.logfile = sys.stdout
child.sendline('su {}'.format(self.user))
try:
child.expect('.*$')
except Exception as e:
print 'Error: Unable to switch to user {}'.format(self.user)
return 'Error on switching to user {usr}'.format(usr=self.user)
time.sleep(1)
child.sendline('cd')
try:
child.expect('.*')
except Exception as e:
print 'Error: Unable to navigate to the user\'s {} home folder.'.format(self.user)
return 'Error on navigating to user\'s {usr} home folder.'.format(usr=self.user)
time.sleep(1)
if not src:
return '*ERROR* Git source folder is NULL !'
if '//' not in src:
return '*ERROR* Git source folder `{}` is invalid !'.format(src)
if not dst:
return '*ERROR* Git destination folder is NULL !'
src = src.replace('//', '//{}@'.format(usr))
branch = self.data['branch']
if not branch:
return 'You must specify a branch for snapshot/update!'
# Normal Git clone operation
if command == 'clone' or (command == 'pull' and overwrite):
if overwrite and os.path.exists(dst):
print 'GIT Plugin: Deleting folder `{}` ...'.format(dst)
shutil.rmtree(dst, ignore_errors=True)
to_exec = 'git clone -b {branch} {src} {dst}'.format(branch=branch, src=src, dst=dst)
print('GIT Plugin: Exec `{}` .'.format(to_exec.strip()))
child.sendline(to_exec.strip())
try:
i = child.expect(['.*password:','Are you sure.*','Permission denied'], 10)
if i == 0 and pwd:
child.sendline(pwd)
elif i == 1 and pwd:
child.sendline('yes')
time.sleep(1)
try:
child.expect('.*password:')
except Exception as e:
return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
cmd=command, src=src, dst=dst, e=e)
time.sleep(1)
child.sendline(pwd)
elif i == 2:
print 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
cmd=command, src=src, dst=dst, e='Permission denied!')
return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
cmd=command, src=src, dst=dst, e='Permission denied!')
except Exception as e:
return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
cmd=command, src=src, dst=dst, e=e)
time.sleep(1)
try:
i = child.expect(['Resolving deltas.*done\.',
'fatal: The remote end hung up unexpectedly',
'Permission denied',
'Could not read from remote repository'], None)
if i == 1:
# fatal: Remote branch branch_name not found in upstream origin
print 'Error on calling GIT clone: {} do not exist.'.format(branch)
return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`)! Branch {br} do not exist!'.format(
cmd=command, src=src, dst=dst,br=branch)
elif i == 2:
# that password is incorrect
print 'Error on calling GIT clone: Incorrect username or password for GIT repository.'
return 'Error on calling GIT clone: Incorrect username or password for GIT repository.'
elif i == 3:
# the path to the repository is incorrect
print 'Error on calling GIT clone: Incorrect path for GIT repository.'
return 'Error on calling GIT clone: Incorrect path for GIT repository.'
except Exception as e:
return 'Error after calling GIT {cmd}: `{e}`!'.format(cmd=command, e=e)
child.sendline('\n\n')
time.sleep(1)
print('-'*40)
# Git pull operation
elif command == 'pull':
if not os.path.exists(dst):
return 'Error: path `{}` does not exist!'.format(dst)
child.sendline('cd {}'.format(dst))
try:
i = child.expect(['Permission denied', 'No such file or directory', '{}'.format(dst)])
if i == 0:
return 'Error: cannot enter in directory: {}. Permission denied!'.format(dst)
elif i == 1:
return 'Error: cannot enter in directory: {}. No such file or directory!'.format(dst)
except Exception as e:
print 'Error: cannot enter in directory: {}'.format(dst)
return 'Error: cannot enter indirectory: `{dst}`!\n{e}'.format(dst=dst, e=e)
time.sleep(1)
to_exec = 'git checkout {}'.format(branch)
print('GIT Plugin: Exec `{}` .'.format(to_exec.strip()))
child.sendline(to_exec.strip())
time.sleep(1)
try:
i = child.expect(['Switched to.*',
'Your branch is up-to-date with',
'error',
'Not a git repository',
'Already on.*'], 30)
if i == 2:
# error: pathspec branch_name did not match any file(s) known to git.
# the specified branch does not exist on the repository
print 'Error on calling GIT checkout: branch {} do not exist.'.format(branch)
return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`)!\n\
Branch `{br}` does not exist!'.format(cmd=command, src=src, dst=dst, br=branch)
elif i == 3:
# fatal: Not a git repository (or any of the parent directories): .git
# Trying to make a checkout without making a clone first
print 'Error on calling GIT checkout: repository {} does not exist.'.format(branch)
return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`)!\n\
Make a snapshot before doing an update!'.format(cmd=command, src=src, dst=dst, br=branch)
except Exception as e:
print 'Error on calling {}. Got unexpected response from GIT.'.format(to_exec)
return 'Error on calling GIT {cmd} (from `{src}` to `{dst}`): `{e}`!'.format(
cmd=command, src=src, dst=dst, e=e)
time.sleep(1)
child.sendline('git pull -f')
try:
child.expect('.*password:')
except Exception as e:
print 'Error after calling GIT pull -f'
return 'Error after calling GIT {cmd}: `{e}`!'.format(cmd=command, e=e)
time.sleep(1)
child.sendline(pwd)
time.sleep(1)
try:
i = child.expect(['up-to-date', 'files changed', 'Permission denied'], 120)
if i == 2:
print 'Error on calling GIT pull: Incorrect password'
return 'Error on calling GIT pull: Incorrect password'
except Exception as e:
return 'Error after calling GIT {cmd}: `{e}`!'.format(cmd=command, e=e)
child.sendline('\n\n')
time.sleep(1)
print('-'*40)
else:
return '*ERROR* Unknown plugin command `{}`!'.format(command)
return 'true'
# | apache-2.0 | 7,007,953,766,511,235,000 | 6,417,670,121,122,522,000 | 40.969697 | 116 | 0.510625 | false |
topix-hackademy/social-listener | application/twitter/tweets/collector.py | 1 | 3236 | from application.mongo import Connection
from application.twitter.interface import TwitterInterface
from application.twitter.tweets.fetcher import TweetsFetcher
from application.processmanager import ProcessManager
from application.utils.helpers import what_time_is_it
import logging
class TweetCollector(TwitterInterface):
def __init__(self, user, *args, **kwargs):
"""
Twitter Collector. This class is used for retrieve tweets from a specific user
"""
super(TweetCollector, self).__init__(*args, **kwargs)
self.user = user
self.process_name = "Tweets Collector: <%s>" % user
self.fetcherInstance = TweetsFetcher(self.auth, self.user, self.process_name)
def __str__(self):
"""
String representation
:return:
"""
return "Tweet Collector for user <{user}>".format(user=self.user)
def start(self, process_manager):
"""
Start async job for user's tweets
:param process_manager: Process manager instance
:return:
"""
try:
process_manager.create_process(target=self.fetcher,
name=self.process_name,
ptype='twitter_collector')
except Exception:
raise Exception('Error Creating new Process')
def fetcher(self):
"""
Tweets loader
:return:
"""
for page in self.fetcherInstance.get_tweets():
for tweet in page:
try:
if not Connection.Instance().db.twitter.find_one({'user': tweet.user.screen_name,
'source': 'collector',
'data.id': tweet.id}):
Connection.Instance().db.twitter.insert_one({
'source': 'collector',
'data': {
'created_at': tweet.created_at,
'favorite_count': tweet.favorite_count,
'geo': tweet.geo,
'id': tweet.id,
'source': tweet.source,
'in_reply_to_screen_name': tweet.in_reply_to_screen_name,
'in_reply_to_status_id': tweet.in_reply_to_status_id,
'in_reply_to_user_id': tweet.in_reply_to_user_id,
'retweet_count': tweet.retweet_count,
'retweeted': tweet.retweeted,
'text': tweet.text,
'entities': tweet.entities
},
'user': tweet.user.screen_name,
'created': what_time_is_it()
})
except Exception as genericException:
logging.error("MongoDB Insert Error in collector: %s" % genericException)
import multiprocessing
ProcessManager.terminate_process(multiprocessing.current_process().pid, True)
| mit | -2,862,836,286,225,588,000 | 5,803,678,356,733,388,000 | 43.328767 | 101 | 0.491656 | false |
Manuel4131/swampdragon | swampdragon/serializers/serializer_tools.py | 9 | 3428 | from collections import namedtuple
from django.db.models.fields.related import ForeignKey, ReverseSingleRelatedObjectDescriptor, \
ManyRelatedObjectsDescriptor, ReverseManyRelatedObjectsDescriptor, ForeignRelatedObjectsDescriptor, \
SingleRelatedObjectDescriptor
# from django.db.models.related import RelatedObject
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.fields.related import ManyToManyField
class FieldType(namedtuple('FieldType', 'field, model, fk, m2m')):
'''
Determine if a field is an m2m, reverse m2m, fk or reverse fk
'''
@property
def is_m2m(self):
return self.fk is False and self.m2m is True and isinstance(self.field, ForeignObjectRel)
@property
def is_reverse_m2m(self):
return self.fk is True and self.m2m is True and isinstance(self.field, ManyToManyField)
@property
def is_fk(self):
return self.fk is True and self.m2m is False and isinstance(self.field, ForeignKey)
@property
def is_reverse_fk(self):
return self.fk is False and self.m2m is False and isinstance(self.field, ForeignObjectRel)
def get_serializer_relationship_field(serializer, related_serializer):
if isinstance(serializer, type):
model = serializer().opts.model
else:
model = serializer.opts.model
if isinstance(related_serializer, type):
related_model = related_serializer().opts.model
else:
related_model = related_serializer.opts.model
for field_name in related_model._meta.get_all_field_names():
field_type = FieldType(*related_model._meta.get_field_by_name(field_name))
field = field_type.field
# Foreign key
if field_type.is_fk and field.rel.to is model:
return field.verbose_name
# Reverse foreign key
if field_type.is_reverse_fk and field.model is model:
return field.var_name
# M2m fields
if field_type.is_m2m and field.model is model:
return field.var_name
# Reverse m2m field
if field_type.is_reverse_m2m and field.rel.to is model:
return field.attname
def get_id_mappings(serializer):
if not serializer.instance:
return {}
data = {}
for field_name in serializer.opts.publish_fields:
if not hasattr(serializer, field_name):
continue
serializable_field = serializer._get_related_serializer(field_name)
if not hasattr(serializable_field, 'serialize'):
continue
field_type = getattr(serializer.opts.model, field_name)
is_fk = isinstance(field_type, ReverseSingleRelatedObjectDescriptor)
is_o2o = isinstance(field_type, SingleRelatedObjectDescriptor)
is_reverse_fk = isinstance(field_type, ForeignRelatedObjectsDescriptor)
is_m2m = isinstance(field_type, ManyRelatedObjectsDescriptor)
is_reverse_m2m = isinstance(field_type, ReverseManyRelatedObjectsDescriptor)
try:
val = getattr(serializer.instance, field_name)
except:
continue
if not val:
continue
if is_fk or is_o2o:
data['{}'.format(field_name)] = val.pk
continue
if is_reverse_fk or is_m2m or is_reverse_m2m:
data['{}'.format(field_name)] = list(val.all().values_list('pk', flat=True))
continue
return data
| bsd-3-clause | 6,532,527,885,721,574,000 | 1,805,513,185,774,565,000 | 33.979592 | 105 | 0.672695 | false |
baris/pushmanager | testing/testdb.py | 1 | 4248 | #!/usr/bin/python
from datetime import datetime, timedelta
import os
import sqlite3
import tempfile
import time
from core import db
def create_temp_db_file():
fd, db_file_path = tempfile.mkstemp(suffix="pushmanager.db")
os.close(fd)
return db_file_path
def get_temp_db_uri(dbfile=None):
if not dbfile:
dbfile = create_temp_db_file()
return "sqlite:///" + dbfile
def make_test_db(dbfile=None):
if not dbfile:
dbfile = create_temp_db_file()
testsql = open(
os.path.join(
os.path.dirname(__file__),
"testdb.sql"
)
).read()
test_db = sqlite3.connect(dbfile)
test_db.cursor().executescript(testsql)
test_db.commit()
test_db.close()
return dbfile
class FakeDataMixin(object):
now = time.time()
yesterday = time.mktime((datetime.now() - timedelta(days=1)).timetuple())
push_data = [
[10, 'OnePush', 'bmetin', 'deploy-1', 'abc', 'live', yesterday, now, 'regular', ''],
[11, 'TwoPush', 'troscoe', 'deploy-2', 'def', 'accepting', now, now, 'regular', ''],
[12, 'RedPush', 'heyjoe', 'deploy-3', 'ghi', 'accepting', now, now, 'regular', ''],
[13, 'BluePush', 'humpty', 'deploy-4', 'jkl', 'accepting', now, now, 'regular', ''],
]
push_keys = [
'id', 'title', 'user', 'branch', 'revision', 'state',
'created', 'modified', 'pushtype', 'extra_pings'
]
fake_revision = "0"*40
request_data = [
[10, 'keysersoze', 'requested', 'keysersoze', 'usual_fix', '', now, now, 'Fix stuff', 'no comment', 12345, '', fake_revision],
[11, 'bmetin', 'requested', 'bmetin', 'fix1', '', now, now, 'Fixing more stuff', 'yes comment', 234, '', fake_revision],
[12, 'testuser1', 'requested', 'testuser2', 'fix1', 'search', now, now, 'Fixing1', 'no comment', 123, '', fake_revision],
[13, 'testuser2', 'requested', 'testuser2', 'fix2', 'search', now, now, 'Fixing2', 'yes comment', 456, '', fake_revision],
]
request_keys = [
'id', 'user', 'state', 'repo', 'branch', 'tags', 'created', 'modified',
'title', 'comments', 'reviewid', 'description', 'revision'
]
def on_db_return(self, success, db_results):
assert success
def make_push_dict(self, data):
return dict(zip(self.push_keys, data))
def make_request_dict(self, data):
return dict(zip(self.request_keys, data))
def insert_pushes(self):
push_queries = []
for pd in self.push_data:
push_queries.append(db.push_pushes.insert(self.make_push_dict(pd)))
db.execute_transaction_cb(push_queries, self.on_db_return)
def insert_requests(self):
request_queries = []
for rd in self.request_data:
request_queries.append(db.push_requests.insert(self.make_request_dict(rd)))
db.execute_transaction_cb(request_queries, self.on_db_return)
def insert_pushcontent(self, requestid, pushid):
db.execute_cb(
db.push_pushcontents.insert({'request': requestid, 'push': pushid}),
self.on_db_return
)
def get_push_for_request(self, requestid):
pushid = [None]
def on_select_return(success, db_results):
assert success
_, pushid[0] = db_results.fetchone()
# check if we have a push in with request
first_pushcontent_query = db.push_pushcontents.select(
db.push_pushcontents.c.request == requestid
)
db.execute_cb(first_pushcontent_query, on_select_return)
return pushid[0]
def get_pushes(self):
pushes = [None]
def on_select_return(success, db_results):
assert success
pushes[0] = db_results.fetchall()
db.execute_cb(db.push_pushes.select(), on_select_return)
return pushes[0]
def get_requests(self):
requests = [None]
def on_select_return(success, db_results):
assert success
requests[0] = db_results.fetchall()
db.execute_cb(db.push_requests.select(), on_select_return)
return requests[0]
def get_requests_by_user(self, user):
return [req for req in self.get_requests() if req['user'] == user]
| apache-2.0 | -5,860,958,603,917,390,000 | 8,021,900,257,754,444,000 | 32.714286 | 134 | 0.591102 | false |
c2theg/DDoS_Information_Sharing | libraries/suds-jurko-0.6/suds/properties.py | 18 | 15900 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Properties classes.
"""
class AutoLinker(object):
"""
Base class, provides interface for I{automatic} link
management between a L{Properties} object and the L{Properties}
contained within I{values}.
"""
def updated(self, properties, prev, next):
"""
Notification that a values was updated and the linkage
between the I{properties} contained with I{prev} need to
be relinked to the L{Properties} contained within the
I{next} value.
"""
pass
class Link(object):
"""
Property link object.
@ivar endpoints: A tuple of the (2) endpoints of the link.
@type endpoints: tuple(2)
"""
def __init__(self, a, b):
"""
@param a: Property (A) to link.
@type a: L{Property}
@param b: Property (B) to link.
@type b: L{Property}
"""
pA = Endpoint(self, a)
pB = Endpoint(self, b)
self.endpoints = (pA, pB)
self.validate(a, b)
a.links.append(pB)
b.links.append(pA)
def validate(self, pA, pB):
"""
Validate that the two properties may be linked.
@param pA: Endpoint (A) to link.
@type pA: L{Endpoint}
@param pB: Endpoint (B) to link.
@type pB: L{Endpoint}
@return: self
@rtype: L{Link}
"""
if pA in pB.links or \
pB in pA.links:
raise Exception, 'Already linked'
dA = pA.domains()
dB = pB.domains()
for d in dA:
if d in dB:
raise Exception, 'Duplicate domain "%s" found' % d
for d in dB:
if d in dA:
raise Exception, 'Duplicate domain "%s" found' % d
kA = pA.keys()
kB = pB.keys()
for k in kA:
if k in kB:
raise Exception, 'Duplicate key %s found' % k
for k in kB:
if k in kA:
raise Exception, 'Duplicate key %s found' % k
return self
def teardown(self):
"""
Teardown the link.
Removes endpoints from properties I{links} collection.
@return: self
@rtype: L{Link}
"""
pA, pB = self.endpoints
if pA in pB.links:
pB.links.remove(pA)
if pB in pA.links:
pA.links.remove(pB)
return self
class Endpoint(object):
"""
Link endpoint (wrapper).
@ivar link: The associated link.
@type link: L{Link}
@ivar target: The properties object.
@type target: L{Property}
"""
def __init__(self, link, target):
self.link = link
self.target = target
def teardown(self):
return self.link.teardown()
def __eq__(self, rhs):
return ( self.target == rhs )
def __hash__(self):
return hash(self.target)
def __getattr__(self, name):
return getattr(self.target, name)
class Definition:
"""
Property definition.
@ivar name: The property name.
@type name: str
@ivar classes: The (class) list of permitted values
@type classes: tuple
@ivar default: The default value.
@ivar type: any
"""
def __init__(self, name, classes, default, linker=AutoLinker()):
"""
@param name: The property name.
@type name: str
@param classes: The (class) list of permitted values
@type classes: tuple
@param default: The default value.
@type default: any
"""
if not isinstance(classes, (list, tuple)):
classes = (classes,)
self.name = name
self.classes = classes
self.default = default
self.linker = linker
def nvl(self, value=None):
"""
Convert the I{value} into the default when I{None}.
@param value: The proposed value.
@type value: any
@return: The I{default} when I{value} is I{None}, else I{value}.
@rtype: any
"""
if value is None:
return self.default
else:
return value
def validate(self, value):
"""
Validate the I{value} is of the correct class.
@param value: The value to validate.
@type value: any
@raise AttributeError: When I{value} is invalid.
"""
if value is None:
return
if len(self.classes) and \
not isinstance(value, self.classes):
msg = '"%s" must be: %s' % (self.name, self.classes)
raise AttributeError,msg
def __repr__(self):
return '%s: %s' % (self.name, str(self))
def __str__(self):
s = []
if len(self.classes):
s.append('classes=%s' % str(self.classes))
else:
s.append('classes=*')
s.append("default=%s" % str(self.default))
return ', '.join(s)
class Properties:
"""
Represents basic application properties.
Provides basic type validation, default values and
link/synchronization behavior.
@ivar domain: The domain name.
@type domain: str
@ivar definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@ivar links: A list of linked property objects used to create
a network of properties.
@type links: [L{Property},..]
@ivar defined: A dict of property values.
@type defined: dict
"""
def __init__(self, domain, definitions, kwargs):
"""
@param domain: The property domain name.
@type domain: str
@param definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@param kwargs: A list of property name/values to set.
@type kwargs: dict
"""
self.definitions = {}
for d in definitions:
self.definitions[d.name] = d
self.domain = domain
self.links = []
self.defined = {}
self.modified = set()
self.prime()
self.update(kwargs)
def definition(self, name):
"""
Get the definition for the property I{name}.
@param name: The property I{name} to find the definition for.
@type name: str
@return: The property definition
@rtype: L{Definition}
@raise AttributeError: On not found.
"""
d = self.definitions.get(name)
if d is None:
raise AttributeError(name)
return d
def update(self, other):
"""
Update the property values as specified by keyword/value.
@param other: An object to update from.
@type other: (dict|L{Properties})
@return: self
@rtype: L{Properties}
"""
if isinstance(other, Properties):
other = other.defined
for n,v in other.items():
self.set(n, v)
return self
def notset(self, name):
"""
Get whether a property has never been set by I{name}.
@param name: A property name.
@type name: str
@return: True if never been set.
@rtype: bool
"""
self.provider(name).__notset(name)
def set(self, name, value):
"""
Set the I{value} of a property by I{name}.
The value is validated against the definition and set
to the default when I{value} is None.
@param name: The property name.
@type name: str
@param value: The new property value.
@type value: any
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, value)
return self
def unset(self, name):
"""
Unset a property by I{name}.
@param name: A property name.
@type name: str
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, None)
return self
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.provider(name).__get(name, *df)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
Link(self, other)
return self
def unlink(self, *others):
"""
Unlink (disassociate) the specified properties object.
@param others: The list object to unlink. Unspecified means unlink all.
@type others: [L{Properties},..]
@return: self
@rtype: L{Properties}
"""
if not len(others):
others = self.links[:]
for p in self.links[:]:
if p in others:
p.teardown()
return self
def provider(self, name, history=None):
"""
Find the provider of the property by I{name}.
@param name: The property name.
@type name: str
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: The provider when found. Otherwise, None (when nested)
and I{self} when not nested.
@rtype: L{Properties}
"""
if history is None:
history = []
history.append(self)
if name in self.definitions:
return self
for x in self.links:
if x in history:
continue
provider = x.provider(name, history)
if provider is not None:
return provider
history.remove(self)
if len(history):
return None
return self
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
def domains(self, history=None):
"""
Get the set of I{all} domain names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of domain names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
domains = set()
domains.add(self.domain)
for x in self.links:
if x in history:
continue
domains.update(x.domains(history))
history.remove(self)
return domains
def prime(self):
"""
Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties}
"""
for d in self.definitions.values():
self.defined[d.name] = d.default
return self
def __notset(self, name):
return not (name in self.modified)
def __set(self, name, value):
d = self.definition(name)
d.validate(value)
value = d.nvl(value)
prev = self.defined[name]
self.defined[name] = value
self.modified.add(name)
d.linker.updated(self, prev, value)
def __get(self, name, *df):
d = self.definition(name)
value = self.defined.get(name)
if value == d.default and len(df):
value = df[0]
return value
def str(self, history):
s = []
s.append('Definitions:')
for d in self.definitions.values():
s.append('\t%s' % repr(d))
s.append('Content:')
for d in self.defined.items():
s.append('\t%s' % str(d))
if self not in history:
history.append(self)
s.append('Linked:')
for x in self.links:
s.append(x.str(history))
history.remove(self)
return '\n'.join(s)
def __repr__(self):
return str(self)
def __str__(self):
return self.str([])
class Skin(object):
"""
The meta-programming I{skin} around the L{Properties} object.
@ivar __pts__: The wrapped object.
@type __pts__: L{Properties}.
"""
def __init__(self, domain, definitions, kwargs):
self.__pts__ = Properties(domain, definitions, kwargs)
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
self.__dict__[name] = value
return
self.__pts__.set(name, value)
def __getattr__(self, name):
return self.__pts__.get(name)
def __repr__(self):
return str(self)
def __str__(self):
return str(self.__pts__)
class Unskin(object):
def __new__(self, *args, **kwargs):
return args[0].__pts__
class Inspector:
"""
Wrapper inspector.
"""
def __init__(self, options):
self.properties = options.__pts__
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.properties.get(name, *df)
def update(self, **kwargs):
"""
Update the property values as specified by keyword/value.
@param kwargs: A list of property name/values to set.
@type kwargs: dict
@return: self
@rtype: L{Properties}
"""
return self.properties.update(**kwargs)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.link(p)
def unlink(self, other):
"""
Unlink (disassociate) the specified properties object.
@param other: The object to unlink.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.unlink(p)
| mit | -75,534,008,657,906,340 | -3,962,169,434,075,460,600 | 28.499072 | 80 | 0.551384 | false |
andreashorn/lead_dbs | ext_libs/SlicerNetstim/WarpDrive/WarpDriveLib/Effects/Effect.py | 1 | 4254 | import vtk, qt, slicer
class AbstractEffect():
"""
One instance of this will be created per-view when the effect
is selected. It is responsible for implementing feedback and
label map changes in response to user input.
This class observes the editor parameter node to configure itself
and queries the current view for background and label volume
nodes to operate on.
"""
def __init__(self,sliceWidget):
# sliceWidget to operate on and convenience variables
# to access the internals
self.sliceWidget = sliceWidget
self.sliceLogic = sliceWidget.sliceLogic()
self.sliceView = self.sliceWidget.sliceView()
self.interactor = self.sliceView.interactorStyle().GetInteractor()
self.renderWindow = self.sliceWidget.sliceView().renderWindow()
self.renderer = self.renderWindow.GetRenderers().GetItemAsObject(0)
#self.editUtil = EditUtil.EditUtil()
# optionally set by users of the class
self.undoRedo = None
# actors in the renderer that need to be cleaned up on destruction
self.actors = []
# the current operation
self.actionState = None
# set up observers on the interactor
# - keep track of tags so these can be removed later
# - currently all editor effects are restricted to these events
# - make the observers high priority so they can override other
# event processors
self.interactorObserverTags = []
events = ( vtk.vtkCommand.LeftButtonPressEvent,
vtk.vtkCommand.LeftButtonReleaseEvent,
vtk.vtkCommand.MiddleButtonPressEvent,
vtk.vtkCommand.MiddleButtonReleaseEvent,
vtk.vtkCommand.RightButtonPressEvent,
vtk.vtkCommand.RightButtonReleaseEvent,
vtk.vtkCommand.LeftButtonDoubleClickEvent,
vtk.vtkCommand.MouseMoveEvent,
vtk.vtkCommand.KeyPressEvent,
vtk.vtkCommand.KeyReleaseEvent,
vtk.vtkCommand.EnterEvent,
vtk.vtkCommand.LeaveEvent,
vtk.vtkCommand.MouseWheelForwardEvent,
vtk.vtkCommand.MouseWheelBackwardEvent)
for e in events:
tag = self.interactor.AddObserver(e, self.processEvent, 1.0)
self.interactorObserverTags.append(tag)
self.sliceNodeTags = []
sliceNode = self.sliceLogic.GetSliceNode()
tag = sliceNode.AddObserver(vtk.vtkCommand.ModifiedEvent, self.processEvent, 1.0)
self.sliceNodeTags.append(tag)
# spot for tracking the current cursor while it is turned off for paining
self.savedCursor = None
def processEvent(self, caller=None, event=None):
"""Event filter that lisens for certain key events that
should be responded to by all events.
Currently:
'\\' - pick up paint color from current location (eyedropper)
"""
if event == "KeyPressEvent":
key = self.interactor.GetKeySym()
if key.lower() == 's':
return True
return False
def cursorOff(self):
"""Turn off and save the current cursor so
the user can see the background image during editing"""
qt.QApplication.setOverrideCursor(qt.QCursor(10))
#self.savedCursor = self.sliceWidget.cursor
#qt_BlankCursor = 10
#self.sliceWidget.setCursor(qt.QCursor(qt_BlankCursor))
def cursorOn(self):
"""Restore the saved cursor if it exists, otherwise
just restore the default cursor"""
qt.QApplication.restoreOverrideCursor()
#if self.savedCursor:
# self.sliceWidget.setCursor(self.savedCursor)
#else:
# self.sliceWidget.unsetCursor()
def abortEvent(self,event):
"""Set the AbortFlag on the vtkCommand associated
with the event - causes other things listening to the
interactor not to receive the events"""
# TODO: make interactorObserverTags a map to we can
# explicitly abort just the event we handled - it will
# be slightly more efficient
for tag in self.interactorObserverTags:
cmd = self.interactor.GetCommand(tag)
cmd.SetAbortFlag(1)
def cleanup(self):
"""clean up actors and observers"""
for a in self.actors:
self.renderer.RemoveActor2D(a)
self.sliceView.scheduleRender()
for tag in self.interactorObserverTags:
self.interactor.RemoveObserver(tag)
sliceNode = self.sliceLogic.GetSliceNode()
for tag in self.sliceNodeTags:
sliceNode.RemoveObserver(tag)
| gpl-3.0 | 2,720,068,834,953,864,700 | 3,968,898,315,338,918,000 | 35.358974 | 85 | 0.718853 | false |
quattor/aquilon | lib/aquilon/worker/commands/add_service.py | 2 | 1676 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add service`."""
from aquilon.exceptions_ import AuthorizationException
from aquilon.aqdb.model import Service
from aquilon.worker.broker import BrokerCommand
class CommandAddService(BrokerCommand):
requires_plenaries = True
required_parameters = ["service"]
def render(self, session, plenaries, dbuser, service, need_client_list,
allow_alias_bindings, comments, **_):
Service.get_unique(session, service, preclude=True)
if dbuser.role.name != 'aqd_admin' and allow_alias_bindings is not None:
raise AuthorizationException("Only AQD admin can set allowing alias bindings")
dbservice = Service(name=service, comments=comments,
need_client_list=need_client_list, allow_alias_bindings=allow_alias_bindings)
session.add(dbservice)
plenaries.add(dbservice)
session.flush()
plenaries.write()
return
| apache-2.0 | 1,648,268,977,731,878,700 | -500,513,434,240,726,100 | 37.090909 | 105 | 0.711814 | false |
bertucho/epic-movie-quotes-quiz | dialogos/build/Twisted/twisted/internet/test/test_glibbase.py | 39 | 2284 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.internet.glibbase.
"""
from __future__ import division, absolute_import
import sys
from twisted.trial.unittest import TestCase
from twisted.internet._glibbase import ensureNotImported
class EnsureNotImportedTests(TestCase):
"""
L{ensureNotImported} protects against unwanted past and future imports.
"""
def test_ensureWhenNotImported(self):
"""
If the specified modules have never been imported, and import
prevention is requested, L{ensureNotImported} makes sure they will not
be imported in the future.
"""
modules = {}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.",
preventImports=["m1", "m2", "m3"])
self.assertEqual(modules, {"m1": None, "m2": None, "m3": None})
def test_ensureWhenNotImportedDontPrevent(self):
"""
If the specified modules have never been imported, and import
prevention is not requested, L{ensureNotImported} has no effect.
"""
modules = {}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.")
self.assertEqual(modules, {})
def test_ensureWhenFailedToImport(self):
"""
If the specified modules have been set to C{None} in C{sys.modules},
L{ensureNotImported} does not complain.
"""
modules = {"m2": None}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.", preventImports=["m1", "m2"])
self.assertEqual(modules, {"m1": None, "m2": None})
def test_ensureFailsWhenImported(self):
"""
If one of the specified modules has been previously imported,
L{ensureNotImported} raises an exception.
"""
module = object()
modules = {"m2": module}
self.patch(sys, "modules", modules)
e = self.assertRaises(ImportError, ensureNotImported,
["m1", "m2"], "A message.",
preventImports=["m1", "m2"])
self.assertEqual(modules, {"m2": module})
self.assertEqual(e.args, ("A message.",))
| mit | 8,807,974,853,935,327,000 | 7,272,463,777,436,321,000 | 32.588235 | 82 | 0.603765 | false |
marcoantoniooliveira/labweb | oscar/apps/order/south_migrations/0027_no_null_in_charfields.py | 8 | 47820 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(DataMigration):
def forwards(self, orm):
orm.Line.objects.filter(partner_name__isnull=True).update(partner_name='')
orm.Line.objects.filter(status__isnull=True).update(status='')
orm.Line.objects.filter(partner_line_reference__isnull=True).update(partner_line_reference='')
orm.Line.objects.filter(partner_line_notes__isnull=True).update(partner_line_notes='')
orm.OrderDiscount.objects.filter(offer_name__isnull=True).update(offer_name='')
orm.OrderDiscount.objects.filter(voucher_code__isnull=True).update(voucher_code='')
orm.OrderDiscount.objects.filter(message__isnull=True).update(message='')
orm.OrderNote.objects.filter(note_type__isnull=True).update(note_type='')
orm.Order.objects.filter(status__isnull=True).update(status='')
orm.Order.objects.filter(shipping_method__isnull=True).update(shipping_method='')
orm.Order.objects.filter(guest_email__isnull=True).update(guest_email='')
orm.BillingAddress.objects.filter(first_name__isnull=True).update(first_name='')
orm.BillingAddress.objects.filter(title__isnull=True).update(title='')
orm.BillingAddress.objects.filter(line4__isnull=True).update(line4='')
orm.BillingAddress.objects.filter(line3__isnull=True).update(line3='')
orm.BillingAddress.objects.filter(line2__isnull=True).update(line2='')
orm.BillingAddress.objects.filter(state__isnull=True).update(state='')
orm.BillingAddress.objects.filter(postcode__isnull=True).update(postcode='')
orm.ShippingEvent.objects.filter(notes__isnull=True).update(notes='')
orm.ShippingAddress.objects.filter(first_name__isnull=True).update(first_name='')
orm.ShippingAddress.objects.filter(title__isnull=True).update(title='')
orm.ShippingAddress.objects.filter(notes__isnull=True).update(notes='')
orm.ShippingAddress.objects.filter(line4__isnull=True).update(line4='')
orm.ShippingAddress.objects.filter(line3__isnull=True).update(line3='')
orm.ShippingAddress.objects.filter(line2__isnull=True).update(line2='')
orm.ShippingAddress.objects.filter(state__isnull=True).update(state='')
orm.ShippingAddress.objects.filter(postcode__isnull=True).update(postcode='')
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
u'address.country': {
'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'basket.basket': {
'Meta': {'object_name': 'Basket'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_merged': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'baskets'", 'null': 'True', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
'vouchers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['voucher.Voucher']", 'null': 'True', 'blank': 'True'})
},
u'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"})
},
u'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
u'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
u'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['catalogue.ProductClass']"}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('oscar.models.fields.NullCharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productcategory': {
'Meta': {'ordering': "['product', 'category']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'null': 'True', 'blank': 'True'})
},
u'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
u'offer.condition': {
'Meta': {'object_name': 'Condition'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
u'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_basket_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_user_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'num_applications': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '64'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
u'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': u"orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'through': u"orm['offer.RangeProduct']", 'to': u"orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'})
},
u'offer.rangeproduct': {
'Meta': {'unique_together': "(('range', 'product'),)", 'object_name': 'RangeProduct'},
'display_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['offer.Range']"})
},
u'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'order.communicationevent': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'CommunicationEvent'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['customer.CommunicationEventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': u"orm['order.Order']"})
},
u'order.line': {
'Meta': {'object_name': 'Line'},
'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['order.Order']"}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['partner.Partner']"}),
'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['partner.StockRecord']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
u'order.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': u"orm['order.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['catalogue.Option']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'order.lineprice': {
'Meta': {'ordering': "('id',)", 'object_name': 'LinePrice'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': u"orm['order.Line']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': u"orm['order.Order']"}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
u'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['basket.Basket']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.BillingAddress']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.ShippingAddress']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'shipping_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)})
},
u'order.orderdiscount': {
'Meta': {'object_name': 'OrderDiscount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'category': ('django.db.models.fields.CharField', [], {'default': "'Basket'", 'max_length': '64'}),
'frequency': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'offer_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': u"orm['order.Order']"}),
'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'order.ordernote': {
'Meta': {'object_name': 'OrderNote'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': u"orm['order.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'})
},
u'order.paymentevent': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'PaymentEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.PaymentEventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['order.Line']", 'through': u"orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': u"orm['order.Order']"}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'shipping_event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'null': 'True', 'to': u"orm['order.ShippingEvent']"})
},
u'order.paymenteventquantity': {
'Meta': {'object_name': 'PaymentEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': u"orm['order.PaymentEvent']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_event_quantities'", 'to': u"orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'order.paymenteventtype': {
'Meta': {'ordering': "('name',)", 'object_name': 'PaymentEventType'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('oscar.models.fields.PhoneNumberField', [], {'max_length': '128', 'blank': 'True'}),
'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'order.shippingevent': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'ShippingEvent'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.ShippingEventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shipping_events'", 'symmetrical': 'False', 'through': u"orm['order.ShippingEventQuantity']", 'to': u"orm['order.Line']"}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': u"orm['order.Order']"})
},
u'order.shippingeventquantity': {
'Meta': {'object_name': 'ShippingEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': u"orm['order.ShippingEvent']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_event_quantities'", 'to': u"orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'order.shippingeventtype': {
'Meta': {'ordering': "('name',)", 'object_name': 'ShippingEventType'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)})
},
u'partner.stockrecord': {
'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_allocated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['partner.Partner']"}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['catalogue.Product']"})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'voucher.voucher': {
'Meta': {'object_name': 'Voucher'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'num_basket_additions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'vouchers'", 'symmetrical': 'False', 'to': u"orm['offer.ConditionalOffer']"}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'usage': ('django.db.models.fields.CharField', [], {'default': "'Multi-use'", 'max_length': '128'})
}
}
complete_apps = ['order']
symmetrical = True
| bsd-3-clause | -5,886,064,668,825,330,000 | -1,984,126,787,999,005,400 | 93.693069 | 238 | 0.566269 | false |
clagiordano/projectDeploy | modules/utils.py | 1 | 1458 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import shlex
import socket
import modules.outputUtils as out
def getSessionInfo():
info = {}
output = subprocess.Popen(["who", "am", "i"], stdout=subprocess.PIPE).communicate()
output = output[0].strip().split(' ')
info['username'] = os.getlogin()
info['ipaddress'] = output[-1][1:-1]
info['hostname'] = socket.gethostname()
if info['ipaddress'] != ":0":
try:
info['hostname'] = socket.gethostbyaddr(info['ipaddress'])
except:
try:
info['hostname'] = getNetbiosHostname(info['ipaddress'])
except:
info['hostname'] = info['ipaddress']
return info
def getNetbiosHostname(ipaddress):
output = runShellCommand("nmblookup -A " + ipaddress, False)
hostname = output[0].split('\n')[1].split(' ')[0].strip()
if hostname == 'No':
hostname = output[0]
return hostname
def runShellCommand(command, shell=True):
try:
p = subprocess.Popen( \
shlex.split(command), \
shell=shell, \
stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
command_output, command_error = p.communicate()
exit_status = p.returncode
except:
out.fatalError("Failed to execute command " + command)
return command_output, exit_status, command_error
| lgpl-3.0 | 4,267,647,469,595,194,400 | -5,024,580,417,498,602,000 | 27.588235 | 87 | 0.59465 | false |
mkuron/espresso | testsuite/python/dpd.py | 1 | 15785 | #
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import unittest as ut
import unittest_decorators as utx
from itertools import product
import espressomd
from espressomd.observables import DPDStress
from tests_common import single_component_maxwell
@utx.skipIfMissingFeatures("DPD")
class DPDThermostat(ut.TestCase):
"""Tests the velocity distribution created by the dpd thermostat against
the single component Maxwell distribution."""
s = espressomd.System(box_l=3*[10.0])
s.time_step = 0.01
s.cell_system.skin = 0.4
def setUp(self):
self.s.seed = range(self.s.cell_system.get_state()["n_nodes"])
np.random.seed(16)
def tearDown(self):
s = self.s
s.part.clear()
def check_velocity_distribution(self, vel, minmax, n_bins, error_tol, kT):
"""check the recorded particle distributions in velocity against a
histogram with n_bins bins. Drop velocities outside minmax. Check
individual histogram bins up to an accuracy of error_tol against
the analytical result for kT."""
for i in range(3):
hist = np.histogram(vel[:, i], range=(-minmax, minmax), bins=n_bins, density=False)
data = hist[0]/float(vel.shape[0])
bins = hist[1]
for j in range(n_bins):
found = data[j]
expected = single_component_maxwell(bins[j], bins[j+1], kT)
self.assertLessEqual(abs(found - expected), error_tol)
def test_aa_verify_single_component_maxwell(self):
"""Verifies the normalization of the analytical expression."""
self.assertLessEqual(
abs(single_component_maxwell(-10, 10, 4.)-1.), 1E-4)
def check_total_zero(self):
v_total = np.sum(self.s.part[:].v, axis=0)
self.assertTrue(v_total[0] < 1e-11)
self.assertTrue(v_total[1] < 1e-11)
self.assertTrue(v_total[2] < 1e-11)
def test_single(self):
"""Test velocity distribution of a dpd fluid with a single type."""
N = 200
s = self.s
s.part.add(pos=s.box_l * np.random.random((N, 3)))
kT = 2.3
gamma = 1.5
s.thermostat.set_dpd(kT=kT, seed=42)
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=1.5,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.5)
s.integrator.run(100)
loops = 250
v_stored = np.zeros((N*loops, 3))
for i in range(loops):
s.integrator.run(10)
v_stored[i*N:(i+1)*N,:] = s.part[:].v
v_minmax = 5
bins = 5
error_tol = 0.01
self.check_velocity_distribution(
v_stored, v_minmax, bins, error_tol, kT)
self.check_total_zero()
def test_binary(self):
"""Test velocity distribution of binary dpd fluid"""
N = 200
s = self.s
s.part.add(pos=s.box_l * np.random.random((N // 2, 3)), type=N//2*[0])
s.part.add(pos=s.box_l * np.random.random((N // 2, 3)), type=N//2*[1])
kT = 2.3
gamma = 1.5
s.thermostat.set_dpd(kT=kT, seed=42)
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=1.0,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.0)
s.non_bonded_inter[1, 1].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=1.0,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.0)
s.non_bonded_inter[0, 1].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=1.5,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.5)
s.integrator.run(100)
loops = 400
v_stored = np.zeros((N*loops, 3))
for i in range(loops):
s.integrator.run(10)
v_stored[i*N:(i+1)*N,:] = s.part[:].v
v_minmax = 5
bins = 5
error_tol = 0.01
self.check_velocity_distribution(
v_stored, v_minmax, bins, error_tol, kT)
self.check_total_zero()
def test_disable(self):
N = 200
s = self.s
s.time_step = 0.01
s.part.add(pos=s.box_l * np.random.random((N, 3)))
kT = 2.3
gamma = 1.5
s.thermostat.set_dpd(kT=kT, seed=42)
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=1.5,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.5)
s.integrator.run(10)
s.thermostat.turn_off()
# Reset velocities
s.part[:].v = [1., 2., 3.]
s.integrator.run(10)
# Check that there was neither noise nor friction
for v in s.part[:].v:
for i in range(3):
self.assertTrue(v[i] == float(i + 1))
# Turn back on
s.thermostat.set_dpd(kT=kT, seed=42)
# Reset velocities for faster convergence
s.part[:].v = [0., 0., 0.]
# Equilibrate
s.integrator.run(250)
loops = 250
v_stored = np.zeros((N*loops, 3))
for i in range(loops):
s.integrator.run(10)
v_stored[i*N:(i+1)*N,:] = s.part[:].v
v_minmax = 5
bins = 5
error_tol = 0.012
self.check_velocity_distribution(
v_stored, v_minmax, bins, error_tol, kT)
def test_const_weight_function(self):
s = self.s
kT = 0.
gamma = 1.42
s.thermostat.set_dpd(kT=kT, seed=42)
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=1.2,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=1.4)
s.part.add(id=0, pos=[5, 5, 5], type= 0, v=[0, 0, 0])
v = [.5, .8, .3]
s.part.add(id=1, pos=[3, 5, 5], type= 0, v = v)
s.integrator.run(0)
# Outside of both cutoffs, forces should be 0
for f in s.part[:].f:
self.assertTrue(f[0] == 0.)
self.assertTrue(f[1] == 0.)
self.assertTrue(f[2] == 0.)
# Only trans
s.part[1].pos = [5. - 1.3, 5, 5]
s.integrator.run(0)
# Only trans, so x component should be zero
self.assertLess(abs(s.part[0].f[0]), 1e-16)
# f = gamma * v_ij
self.assertTrue(abs(s.part[0].f[1] - gamma * v[1]) < 1e-11)
self.assertTrue(abs(s.part[0].f[2] - gamma * v[2]) < 1e-11)
# Momentum conservation
self.assertLess(abs(s.part[1].f[0]), 1e-16)
self.assertTrue(abs(s.part[1].f[1] + gamma * v[1]) < 1e-11)
self.assertTrue(abs(s.part[1].f[2] + gamma * v[2]) < 1e-11)
# Trans and parallel
s.part[1].pos = [5. - 1.1, 5, 5]
s.integrator.run(0)
self.assertTrue(abs(s.part[0].f[0] - gamma * v[0]) < 1e-11)
self.assertTrue(abs(s.part[0].f[1] - gamma * v[1]) < 1e-11)
self.assertTrue(abs(s.part[0].f[2] - gamma * v[2]) < 1e-11)
self.assertTrue(abs(s.part[1].f[0] + gamma * v[0]) < 1e-11)
self.assertTrue(abs(s.part[1].f[1] + gamma * v[1]) < 1e-11)
self.assertTrue(abs(s.part[1].f[2] + gamma * v[2]) < 1e-11)
def test_linear_weight_function(self):
s = self.s
kT = 0.
gamma = 1.42
s.thermostat.set_dpd(kT=kT, seed=42)
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=1, gamma=gamma, r_cut=1.2,
trans_weight_function=1, trans_gamma=gamma, trans_r_cut=1.4)
def omega(dist, r_cut):
return (1. - dist / r_cut)
s.part.add(id=0, pos=[5, 5, 5], type= 0, v=[0, 0, 0])
v = [.5, .8, .3]
s.part.add(id=1, pos=[3, 5, 5], type= 0, v = v)
s.integrator.run(0)
# Outside of both cutoffs, forces should be 0
for f in s.part[:].f:
self.assertTrue(f[0] == 0.)
self.assertTrue(f[1] == 0.)
self.assertTrue(f[2] == 0.)
# Only trans
s.part[1].pos = [5. - 1.3, 5, 5]
s.integrator.run(0)
# Only trans, so x component should be zero
self.assertLess(abs(s.part[0].f[0]), 1e-16)
# f = gamma * v_ij
self.assertTrue(
abs(s.part[0].f[1] - omega(1.3, 1.4)**2*gamma*v[1]) < 1e-11)
self.assertTrue(
abs(s.part[0].f[2] - omega(1.3, 1.4)**2*gamma*v[2]) < 1e-11)
# Momentum conservation
self.assertLess(abs(s.part[1].f[0]), 1e-16)
self.assertTrue(
abs(s.part[1].f[1] + omega(1.3, 1.4)**2*gamma*v[1]) < 1e-11)
self.assertTrue(
abs(s.part[1].f[2] + omega(1.3, 1.4)**2*gamma*v[2]) < 1e-11)
# Trans and parallel
s.part[1].pos = [5. - 1.1, 5, 5]
s.integrator.run(0)
self.assertTrue(
abs(s.part[0].f[0] - omega(1.1, 1.2)**2*gamma*v[0]) < 1e-11)
self.assertTrue(
abs(s.part[0].f[1] - omega(1.1, 1.4)**2*gamma*v[1]) < 1e-11)
self.assertTrue(
abs(s.part[0].f[2] - omega(1.1, 1.4)**2*gamma*v[2]) < 1e-11)
self.assertTrue(
abs(s.part[1].f[0] + omega(1.1, 1.2)**2*gamma*v[0]) < 1e-11)
self.assertTrue(
abs(s.part[1].f[1] + omega(1.1, 1.4)**2*gamma*v[1]) < 1e-11)
self.assertTrue(
abs(s.part[1].f[2] + omega(1.1, 1.4)**2*gamma*v[2]) < 1e-11)
# Trans and parallel 2nd point
s.part[1].pos = [5. - 0.5, 5, 5]
s.integrator.run(0)
self.assertTrue(
abs(s.part[0].f[0] - omega(0.5, 1.2)**2*gamma*v[0]) < 1e-11)
self.assertTrue(
abs(s.part[0].f[1] - omega(0.5, 1.4)**2*gamma*v[1]) < 1e-11)
self.assertTrue(
abs(s.part[0].f[2] - omega(0.5, 1.4)**2*gamma*v[2]) < 1e-11)
self.assertTrue(
abs(s.part[1].f[0] + omega(0.5, 1.2)**2*gamma*v[0]) < 1e-11)
self.assertTrue(
abs(s.part[1].f[1] + omega(0.5, 1.4)**2*gamma*v[1]) < 1e-11)
self.assertTrue(
abs(s.part[1].f[2] + omega(0.5, 1.4)**2*gamma*v[2]) < 1e-11)
def test_ghosts_have_v(self):
s = self.s
r_cut = 1.5
dx = 0.25 * r_cut
def f(i):
if i == 0:
return dx
return 10. - dx
# Put a particle in every corner
for ind in product([0, 1], [0, 1], [0, 1]):
pos = [f(x) for x in ind]
v = ind
s.part.add(pos=pos, v=v)
gamma = 1.0
s.thermostat.set_dpd(kT=0.0, seed=42)
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=r_cut,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=r_cut)
s.integrator.run(0)
id = 0
for ind in product([0, 1], [0, 1], [0, 1]):
for i in ind:
if ind[i] == 0:
sgn = 1
else:
sgn = -1
self.assertAlmostEqual(sgn * 4.0, s.part[id].f[i])
id += 1
def test_constraint(self):
import espressomd.shapes
s = self.s
s.constraints.add(shape=espressomd.shapes.Wall(
dist=0, normal=[1, 0, 0]), particle_type=0, particle_velocity=[1, 2, 3])
s.thermostat.set_dpd(kT=0.0, seed=42)
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=1., r_cut=1.0,
trans_weight_function=0, trans_gamma=1., trans_r_cut=1.0)
p = s.part.add(pos=[0.5, 0, 0], type=0, v=[0, 0, 0])
s.integrator.run(0)
self.assertAlmostEqual(p.f[0], 1.)
self.assertAlmostEqual(p.f[1], 2.)
self.assertAlmostEqual(p.f[2], 3.)
for c in s.constraints:
s.constraints.remove(c)
def test_dpd_stress(self):
def calc_omega(dist):
return (1./dist - 1./r_cut) ** 2.0
def diss_force_1(dist, vel_diff):
f = np.zeros(3)
vel12dotd12 = 0.
dist_norm = np.linalg.norm(dist)
for d in range(3):
vel12dotd12 += vel_diff[d] * dist[d]
friction = gamma * calc_omega(dist_norm) * vel12dotd12
for d in range(3):
f[d] -= (dist[d] * friction)
return f
def diss_force_2(dist, vel_diff):
dist_norm = np.linalg.norm(dist)
mat = np.identity(3) * (dist_norm**2.0)
f = np.zeros(3)
for d1 in range(3):
for d2 in range(3):
mat[d1, d2] -= dist[d1] * dist[d2]
for d1 in range(3):
for d2 in range(3):
f[d1] += mat[d1, d2] * vel_diff[d2]
f[d1] *= - 1.0 * gamma/2.0 * calc_omega(dist_norm)
return f
def calc_stress(dist, vel_diff):
force_pair = diss_force_1(dist, vel_diff) +\
diss_force_2(dist, vel_diff)
stress_pair = np.outer(dist, force_pair)
return stress_pair
n_part = 1000
r_cut = 1.0
gamma = 5.
r_cut = 1.0
s = self.s
s.part.clear()
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=1, gamma=gamma, r_cut=r_cut,
trans_weight_function=1, trans_gamma=gamma/2.0, trans_r_cut=r_cut)
pos = s.box_l * np.random.random((n_part, 3))
s.part.add(pos=pos)
s.integrator.run(10)
s.thermostat.set_dpd(kT=0.0)
s.integrator.run(steps=0, recalc_forces=True)
pairs = s.part.pairs()
stress = np.zeros([3, 3])
for pair in pairs:
dist = s.distance_vec(pair[0], pair[1])
if np.linalg.norm(dist) < r_cut:
vel_diff = pair[1].v - pair[0].v
stress += calc_stress(dist, vel_diff)
stress /= s.box_l[0] ** 3.0
dpd_stress = s.analysis.dpd_stress()
dpd_obs = DPDStress()
obs_stress = dpd_obs.calculate()
obs_stress = np.array([[obs_stress[0], obs_stress[1], obs_stress[2]],
[obs_stress[3], obs_stress[4], obs_stress[5]],
[obs_stress[6], obs_stress[7], obs_stress[8]]])
np.testing.assert_array_almost_equal(np.copy(dpd_stress), stress)
np.testing.assert_array_almost_equal(np.copy(obs_stress), stress)
def test_momentum_conservation(self):
r_cut = 1.0
gamma = 5.
r_cut = 2.9
s = self.s
s.thermostat.set_dpd(kT=1.3, seed=42)
s.part.clear()
s.part.add(pos=((0, 0, 0), (0.1, 0.1, 0.1), (0.1, 0, 0)), mass=(1, 2, 3))
s.non_bonded_inter[0, 0].dpd.set_params(
weight_function=1, gamma=gamma, r_cut=r_cut,
trans_weight_function=1, trans_gamma=gamma/2.0, trans_r_cut=r_cut)
momentum = np.matmul(s.part[:].v.T, s.part[:].mass)
for i in range(10):
s.integrator.run(25)
np.testing.assert_array_less(np.zeros((3, 3)), np.abs(s.part[:].f))
np.testing.assert_allclose(np.matmul(s.part[:].v.T, s.part[:].mass), momentum, atol=1E-12)
if __name__ == "__main__":
ut.main()
| gpl-3.0 | 310,955,811,836,198,140 | -1,822,553,712,062,699,000 | 33.019397 | 102 | 0.526956 | false |
m4dcoder/cortex | setup.py | 1 | 1799 | #!/usr/bin/env python2.7
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from setuptools import setup, find_packages
PKG_ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
PKG_REQ_FILE = '%s/requirements.txt' % PKG_ROOT_DIR
os.chdir(PKG_ROOT_DIR)
def get_version_string():
version = None
sys.path.insert(0, PKG_ROOT_DIR)
from cortex import __version__
version = __version__
sys.path.pop(0)
return version
def get_requirements():
with open(PKG_REQ_FILE) as f:
required = f.read().splitlines()
# Ignore comments in the requirements file
required = [line for line in required if not line.startswith('#')]
return required
setup(
name='cortex',
version=get_version_string(),
packages=find_packages(exclude=[]),
install_requires=get_requirements(),
license='Apache License (2.0)',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
]
)
| apache-2.0 | -5,527,605,614,841,499,000 | -5,083,696,239,992,110,000 | 28.983333 | 74 | 0.67871 | false |
kkk669/mxnet | python/mxnet/visualization.py | 12 | 13772 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, too-many-locals, fixme
# pylint: disable=too-many-branches, too-many-statements
# pylint: disable=too-many-arguments
# pylint: disable=dangerous-default-value
"""Visualization module"""
from __future__ import absolute_import
import re
import copy
import json
from .symbol import Symbol
def _str2tuple(string):
"""Convert shape string to list, internal use only.
Parameters
----------
string: str
Shape string.
Returns
-------
list of str
Represents shape.
"""
return re.findall(r"\d+", string)
def print_summary(symbol, shape=None, line_length=120, positions=[.44, .64, .74, 1.]):
"""Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
"""
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be Symbol")
show_shape = False
if shape is not None:
show_shape = True
interals = symbol.get_internals()
_, out_shapes, _ = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
heads = set(conf["heads"][0])
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
def print_row(fields, positions):
"""Print format row.
Parameters
----------
fields: list
Information field.
positions: list
Field length ratio.
Returns
------
None
"""
line = ''
for i, field in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
print_row(to_display, positions)
print('=' * line_length)
def print_layer_summary(node, out_shape):
"""print layer information
Parameters
----------
node: dict
Node information.
out_shape: dict
Node shape information.
Returns
------
Node total parameters.
"""
op = node["op"]
pre_node = []
pre_filter = 0
if op != "null":
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_node["op"] != "null" or item[0] in heads:
# add precede
pre_node.append(input_name)
if show_shape:
if input_node["op"] != "null":
key = input_name + "_output"
else:
key = input_name
if key in shape_dict:
shape = shape_dict[key][1:]
pre_filter = pre_filter + int(shape[0])
cur_param = 0
if op == 'Convolution':
if ("no_bias" in node["attrs"]) and int(node["attrs"]["no_bias"]):
cur_param = pre_filter * int(node["attrs"]["num_filter"])
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
else:
cur_param = pre_filter * int(node["attrs"]["num_filter"])
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
cur_param += int(node["attrs"]["num_filter"])
elif op == 'FullyConnected':
if ("no_bias" in node["attrs"]) and int(node["attrs"]["no_bias"]):
cur_param = pre_filter * (int(node["attrs"]["num_hidden"]))
else:
cur_param = (pre_filter+1) * (int(node["attrs"]["num_hidden"]))
elif op == 'BatchNorm':
key = node["name"] + "_output"
if show_shape:
num_filter = shape_dict[key][1]
cur_param = int(num_filter) * 2
if not pre_node:
first_connection = ''
else:
first_connection = pre_node[0]
fields = [node['name'] + '(' + op + ')',
"x".join([str(x) for x in out_shape]),
cur_param,
first_connection]
print_row(fields, positions)
if len(pre_node) > 1:
for i in range(1, len(pre_node)):
fields = ['', '', '', pre_node[i]]
print_row(fields, positions)
return cur_param
total_params = 0
for i, node in enumerate(nodes):
out_shape = []
op = node["op"]
if op == "null" and i > 0:
continue
if op != "null" or i in heads:
if show_shape:
if op != "null":
key = node["name"] + "_output"
else:
key = node["name"]
if key in shape_dict:
out_shape = shape_dict[key][1:]
total_params += print_layer_summary(nodes[i], out_shape)
if i == len(nodes) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print('Total params: %s' % total_params)
print('_' * line_length)
def plot_network(symbol, title="plot", save_format='pdf', shape=None, node_attrs={},
hide_weights=True):
"""Creates a visualization (Graphviz digraph object) of the given computation graph.
Graphviz must be installed for this function to work.
Parameters
----------
title: str, optional
Title of the generated visualization.
symbol: Symbol
A symbol from the computation graph. The generated digraph will visualize the part
of the computation graph required to compute `symbol`.
shape: dict, optional
Specifies the shape of the input tensors. If specified, the visualization will include
the shape of the tensors between the nodes. `shape` is a dictionary mapping
input symbol names (str) to the corresponding tensor shape (tuple).
node_attrs: dict, optional
Specifies the attributes for nodes in the generated visualization. `node_attrs` is
a dictionary of Graphviz attribute names and values. For example,
``node_attrs={"shape":"oval","fixedsize":"false"}``
will use oval shape for nodes and allow variable sized nodes in the visualization.
hide_weights: bool, optional
If True (default), then inputs with names of form *_weight (corresponding to weight
tensors) or *_bias (corresponding to bias vectors) will be hidden for a cleaner
visualization.
Returns
-------
dot: Digraph
A Graphviz digraph object visualizing the computation graph to compute `symbol`.
Example
-------
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128)
>>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu")
>>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10)
>>> net = mx.sym.SoftmaxOutput(data=net, name='out')
>>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)},
... node_attrs={"fixedsize":"false"})
>>> digraph.view()
"""
# todo add shape support
try:
from graphviz import Digraph
except:
raise ImportError("Draw network requires graphviz library")
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be a Symbol")
draw_shape = False
if shape is not None:
draw_shape = True
interals = symbol.get_internals()
_, out_shapes, _ = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
# default attributes of node
node_attr = {"shape": "box", "fixedsize": "true",
"width": "1.3", "height": "0.8034", "style": "filled"}
# merge the dict provided by user and the default one
node_attr.update(node_attrs)
dot = Digraph(name=title, format=save_format)
# color map
cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3",
"#fdb462", "#b3de69", "#fccde5")
def looks_like_weight(name):
"""Internal helper to figure out if node should be hidden with `hide_weights`.
"""
if name.endswith("_weight"):
return True
if name.endswith("_bias"):
return True
if name.endswith("_beta") or name.endswith("_gamma") or \
name.endswith("_moving_var") or name.endswith("_moving_mean"):
return True
return False
# make nodes
hidden_nodes = set()
for node in nodes:
op = node["op"]
name = node["name"]
# input data
attr = copy.deepcopy(node_attr)
label = name
if op == "null":
if looks_like_weight(node["name"]):
if hide_weights:
hidden_nodes.add(node["name"])
# else we don't render a node, but
# don't add it to the hidden_nodes set
# so it gets rendered as an empty oval
continue
attr["shape"] = "oval" # inputs get their own shape
label = node["name"]
attr["fillcolor"] = cm[0]
elif op == "Convolution":
label = r"Convolution\n%s/%s, %s" % ("x".join(_str2tuple(node["attrs"]["kernel"])),
"x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1",
node["attrs"]["num_filter"])
attr["fillcolor"] = cm[1]
elif op == "FullyConnected":
label = r"FullyConnected\n%s" % node["attrs"]["num_hidden"]
attr["fillcolor"] = cm[1]
elif op == "BatchNorm":
attr["fillcolor"] = cm[3]
elif op == "Activation" or op == "LeakyReLU":
label = r"%s\n%s" % (op, node["attrs"]["act_type"])
attr["fillcolor"] = cm[2]
elif op == "Pooling":
label = r"Pooling\n%s, %s/%s" % (node["attrs"]["pool_type"],
"x".join(_str2tuple(node["attrs"]["kernel"])),
"x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1")
attr["fillcolor"] = cm[4]
elif op == "Concat" or op == "Flatten" or op == "Reshape":
attr["fillcolor"] = cm[5]
elif op == "Softmax":
attr["fillcolor"] = cm[6]
else:
attr["fillcolor"] = cm[7]
if op == "Custom":
label = node["attrs"]["op_type"]
dot.node(name=name, label=label, **attr)
# add edges
for node in nodes: # pylint: disable=too-many-nested-blocks
op = node["op"]
name = node["name"]
if op == "null":
continue
else:
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_name not in hidden_nodes:
attr = {"dir": "back", 'arrowtail':'open'}
# add shapes
if draw_shape:
if input_node["op"] != "null":
key = input_name + "_output"
if "attrs" in input_node:
params = input_node["attrs"]
if "num_outputs" in params:
key += str(int(params["num_outputs"]) - 1)
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
else:
key = input_name
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
dot.edge(tail_name=name, head_name=input_name, **attr)
return dot
| apache-2.0 | 4,123,008,422,887,404,500 | -66,667,695,363,652,030 | 37.794366 | 95 | 0.521711 | false |
acimmarusti/isl_exercises | chap3/chap3ex8.py | 1 | 1315 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.tools.plotting import scatter_matrix
import statsmodels.formula.api as smf
#from sklearn.linear_model import LinearRegression
#import scipy, scipy.stats
#from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import variance_inflation_factor, summary_table
filename = '../Auto.csv'
data = pd.read_csv(filename, na_values='?').dropna()
#Quantitative and qualitative predictors#
print(data.dtypes)
#Simple linear regression#
slinreg = smf.ols('mpg ~ horsepower', data=data).fit()
print(slinreg.summary())
st, fitdat, ss2 = summary_table(slinreg, alpha=0.05)
fittedvalues = fitdat[:,2]
predict_mean_se = fitdat[:,3]
predict_mean_ci_low, predict_mean_ci_upp = fitdat[:,4:6].T
predict_ci_low, predict_ci_upp = fitdat[:,6:8].T
x = data['horsepower']
y = data['mpg']
#Residuals#
resd1 = y - fittedvalues
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y, 'o')
ax1.plot(x, fittedvalues, 'g-')
ax1.plot(x, predict_ci_low, 'r--')
ax1.plot(x, predict_ci_upp, 'r--')
ax1.plot(x, predict_mean_ci_low, 'b--')
ax1.plot(x, predict_mean_ci_upp, 'b--')
ax2.plot(resd1, fittedvalues, 'o')
plt.show()
| gpl-3.0 | 868,946,192,038,472,000 | -5,485,423,299,455,916,000 | 26.978723 | 89 | 0.726996 | false |
Kwentar/ImageDownloader | vk.py | 1 | 7993 | import json
import random
from urllib.error import URLError
from urllib.parse import urlencode
from urllib.request import urlopen, http, Request
import time
from datetime import date
from Profiler import Profiler
import __setup_photo__ as setup
class VkError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class VkUser:
def __init__(self, uid, name, last_name, day_b, month_b, sex, city_id, age=-1, year_b=-1):
self.uid = uid
self.name = name
self.last_name = last_name
self.day_b = day_b
self.month_b = month_b
if year_b == -1:
year_b = date.today().year - age
if month_b < date.today().month or month_b == date.today().month and day_b < date.today().day:
year_b -= 1
self.year_b = year_b
self.sex = sex
self.city_id = city_id
def __str__(self):
return ";".join([self.uid, self.name, self.last_name,
self.day_b.__str__(), self.month_b.__str__(),
self.year_b.__str__(), self.sex.__str__(),
self.city_id.__str__()])
def get_age(self):
return date.today().year - self.year_b
class Vk:
tokens = setup.user_tokens
curr_token = ''
p = Profiler()
@staticmethod
def check_time(value=0.5):
if Vk.p.get_time() < value:
time.sleep(value)
Vk.p.start()
@staticmethod
def set_token(token):
Vk.tokens.clear()
Vk.tokens.append(token)
@staticmethod
def get_token():
while True:
el = random.choice(Vk.tokens)
if el != Vk.curr_token:
test_url = 'https://api.vk.com/method/getProfiles?uid=66748&v=5.103&access_token=' + el
Vk.check_time(1)
try:
response = urlopen(test_url).read()
result = json.loads(response.decode('utf-8'))
if 'response' in result.keys():
print('now I use the ' + el + ' token')
Vk.curr_token = el
return el
except http.client.BadStatusLine as err_:
print("".join(['ERROR Vk.get_token', err_.__str__()]))
raise VkError('all tokens are invalid: ' + result['error']['error_msg'].__str__())
@staticmethod
def call_api(method, params):
Vk.check_time()
while not Vk.curr_token:
Vk.get_token()
if isinstance(params, list):
params_list = params[:]
elif isinstance(params, dict):
params_list = params.items()
else:
params_list = [params]
params_list += [('access_token', Vk.curr_token), ('v', '5.103')]
url = 'https://api.vk.com/method/%s?%s' % (method, urlencode(params_list))
try:
req = Request(url=url, headers={'User-agent': random.choice(setup.user_agents)})
response = urlopen(req).read()
result = json.loads(response.decode('utf-8'))
try:
if 'response' in result.keys():
return result['response']
else:
raise VkError('no response on answer: ' + result['error']['error_msg'].__str__())
except VkError as err_:
print(err_.value)
Vk.curr_token = Vk.get_token()
# Vk.call_api(method, params)
except URLError as err_:
print('URLError: ' + err_.errno.__str__() + ", " + err_.reason.__str__())
except http.client.BadStatusLine as err_:
print("".join(['ERROR Vk.call_api', err_.__str__()]))
except ConnectionResetError as err_:
print("".join(['ERROR ConnectionResetError', err_.__str__()]))
except ConnectionAbortedError as err_:
print("".join(['ERROR ConnectionAbortedError', err_.__str__()]))
return list()
@staticmethod
def get_uids(age, month, day, city_id, fields='sex'):
search_q = list()
search_q.append(('offset', '0'))
search_q.append(('count', '300'))
search_q.append(('city', city_id))
search_q.append(('fields', fields))
search_q.append(('age_from', age))
search_q.append(('age_to', age))
search_q.append(('has_photo', '1'))
search_q.append(('birth_day', day))
search_q.append(('birth_month', month))
r = Vk.call_api('users.search', search_q)
count = r['count']
users = list()
for el in r['items']:
if 'id' in el.keys() and not el['is_closed']:
user = VkUser(uid=el['id'].__str__(), name=el['first_name'],
last_name=el['last_name'], sex=el['sex'],
day_b=day, month_b=month, age=age, city_id=city_id)
users.append(user)
if count > 1000:
Vk.warning('''Count more than 1000, count = {}, age = {},
month = {}, day = {}'''.format(count, age, month, day))
return users
@staticmethod
def create_user_from_response(response):
if 'user_id' in response.keys():
uid = response['user_id'].__str__()
elif 'uid' in response.keys():
uid = response['uid'].__str__()
else:
return None
if 'deactivated' in response.keys():
return None
last_name = 'None'
sex = 'None'
name = 'None'
city_id = 'None'
day, month, age = [0, 0, 0]
if 'last_name' in response.keys():
last_name = response['last_name'].__str__()
if 'first_name' in response.keys():
name = response['first_name'].__str__()
if 'sex' in response.keys():
sex = response['sex'].__str__()
if 'city' in response.keys():
city_id = response['city'].__str__()
if 'bdate' in response.keys():
bdate = response['bdate'].__str__().split('.')
if len(bdate) > 2:
day, month, age = map(int, bdate)
age = date.today().year - age
else:
day, month = map(int, bdate)
user = VkUser(uid=uid, name=name, last_name=last_name, sex=sex, day_b=day,
month_b=month, age=age, city_id=city_id)
return user
@staticmethod
def get_user_info(uid, fields='city,bdate,sex'):
search_q = list()
search_q.append(('user_id', uid))
search_q.append(('fields', fields))
r = Vk.call_api('users.get', search_q)
for el in r:
user = Vk.create_user_from_response(el)
if user is not None:
return user
@staticmethod
def get_friends(uid, fields='city,bdate,sex'):
search_q = list()
search_q.append(('user_id', uid))
search_q.append(('offset', '0'))
search_q.append(('count', '1000'))
search_q.append(('fields', fields))
r = Vk.call_api('friends.get', search_q)
count = len(r)
users = list()
for el in r:
user = Vk.create_user_from_response(el)
if user is not None:
users.append(user)
if count > 1000:
Vk.warning('Count more than 1000')
return users
@staticmethod
def get_profile_photos(id_):
q = list()
q.append(('owner_id', id_))
q.append(('count', '10'))
q.append(('rev', '1'))
q.append(('extended', '1'))
q.append(('photos_size', '0'))
r = Vk.call_api('photos.getAll', q)
images = []
for photo in r['items']:
max_photo = max(photo['sizes'], key=lambda x: x['width']*x['height'])
images.append(max_photo['url'])
return images
@staticmethod
def warning(msg):
print(msg)
| mit | -8,527,894,477,179,246,000 | 26,934,458,573,564,730 | 34.524444 | 106 | 0.505067 | false |
svirt/tp-libvirt | libvirt/tests/src/virsh_cmd/filter/virsh_nwfilter_dumpxml.py | 4 | 3574 | import logging
from autotest.client.shared import error
from virttest import virsh
from virttest import libvirt_xml
from provider import libvirt_version
def check_list(uuid, name):
"""
Return True if filter found in nwfilter-list
:param uuid: filter uuid
:param name: filter name
:return: True if found, False if not found
"""
cmd_result = virsh.nwfilter_list(options="",
ignore_status=True, debug=True)
output = cmd_result.stdout.strip().split('\n')
for i in range(2, len(output)):
if output[i].split() == [uuid, name]:
return True
return False
def run(test, params, env):
"""
Test command: virsh nwfilter-dumpxml.
1) Prepare parameters.
2) Run dumpxml command.
3) Check result.
"""
# Prepare parameters
filter_name = params.get("dumpxml_filter_name", "")
options_ref = params.get("dumpxml_options_ref", "")
status_error = params.get("status_error", "no")
# acl polkit params
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
" libvirt version.")
virsh_dargs = {'ignore_status': True, 'debug': True}
if params.get('setup_libvirt_polkit') == 'yes':
virsh_dargs['unprivileged_user'] = unprivileged_user
virsh_dargs['uri'] = uri
# Run command
cmd_result = virsh.nwfilter_dumpxml(filter_name, options=options_ref,
**virsh_dargs)
output = cmd_result.stdout.strip()
status = cmd_result.exit_status
# Check result
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command.")
elif status_error == "no":
if status:
raise error.TestFail("Run failed with right command.")
# Get uuid and name from output xml and compare with nwfilter-list
# output
new_filter = libvirt_xml.NwfilterXML()
new_filter['xml'] = output
uuid = new_filter.uuid
name = new_filter.filter_name
if check_list(uuid, name):
logging.debug("The filter with uuid %s and name %s" % (uuid, name) +
" from nwfilter-dumpxml was found in"
" nwfilter-list output")
else:
raise error.TestFail("The uuid %s with name %s from" % (uuid, name) +
" nwfilter-dumpxml did not match with"
" nwfilter-list output")
# Run command second time with uuid
cmd_result = virsh.nwfilter_dumpxml(uuid, options=options_ref,
**virsh_dargs)
output1 = cmd_result.stdout.strip()
status1 = cmd_result.exit_status
if status_error == "yes":
if status1 == 0:
raise error.TestFail("Run successfully with wrong command.")
elif status_error == "no":
if status1:
raise error.TestFail("Run failed with right command.")
if output1 != output:
raise error.TestFail("nwfilter dumpxml output was different" +
" between using filter uuid and name")
| gpl-2.0 | 4,992,049,383,191,394,000 | 425,676,199,820,763,900 | 35.10101 | 81 | 0.575266 | false |
mapennell/ansible | test/units/mock/loader.py | 50 | 2876 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
class DictDataLoader(DataLoader):
def __init__(self, file_mapping=dict()):
assert type(file_mapping) == dict
self._file_mapping = file_mapping
self._build_known_directories()
super(DictDataLoader, self).__init__()
def load_from_file(self, path):
if path in self._file_mapping:
return self.load(self._file_mapping[path], path)
return None
def _get_file_contents(self, path):
if path in self._file_mapping:
return (self._file_mapping[path], False)
else:
raise AnsibleParserError("file not found: %s" % path)
def path_exists(self, path):
return path in self._file_mapping or path in self._known_directories
def is_file(self, path):
return path in self._file_mapping
def is_directory(self, path):
return path in self._known_directories
def list_directory(self, path):
return [x for x in self._known_directories]
def _add_known_directory(self, directory):
if directory not in self._known_directories:
self._known_directories.append(directory)
def _build_known_directories(self):
self._known_directories = []
for path in self._file_mapping:
dirname = os.path.dirname(path)
while dirname not in ('/', ''):
self._add_known_directory(dirname)
dirname = os.path.dirname(dirname)
def push(self, path, content):
rebuild_dirs = False
if path not in self._file_mapping:
rebuild_dirs = True
self._file_mapping[path] = content
if rebuild_dirs:
self._build_known_directories()
def pop(self, path):
if path in self._file_mapping:
del self._file_mapping[path]
self._build_known_directories()
def clear(self):
self._file_mapping = dict()
self._known_directories = []
| gpl-3.0 | -8,799,655,790,220,810,000 | -1,686,389,079,516,011,500 | 30.955556 | 76 | 0.648818 | false |
Velociraptor85/pyload | module/plugins/hoster/NovafileCom.py | 8 | 1270 | # -*- coding: utf-8 -*-
#
# Test links:
# http://novafile.com/vfun4z6o2cit
# http://novafile.com/s6zrr5wemuz4
from ..internal.XFSHoster import XFSHoster
class NovafileCom(XFSHoster):
__name__ = "NovafileCom"
__type__ = "hoster"
__version__ = "0.11"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?novafile\.com/\w{12}'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool",
"Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Novafile.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]"),
("stickell", "[email protected]")]
PLUGIN_DOMAIN = "novafile.com"
ERROR_PATTERN = r'class="alert.+?alert-separate".*?>\s*(?:<p>)?(.*?)\s*</'
WAIT_PATTERN = r'<p>Please wait <span id="count".*?>(\d+)</span> seconds</p>'
LINK_PATTERN = r'<a href="(http://s\d+\.novafile\.com/.*?)" class="btn btn-green">Download File</a>'
| gpl-3.0 | -4,658,544,624,407,446,000 | 1,617,867,294,399,539,000 | 36.352941 | 104 | 0.551969 | false |
naturali/tensorflow | tensorflow/examples/skflow/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 4,372,450,902,877,380,600 | -3,671,352,315,061,745,000 | 36.945455 | 78 | 0.68759 | false |
googleapis/python-compute | google/cloud/compute_v1/services/target_instances/pagers.py | 1 | 5740 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetInstanceAggregatedList],
request: compute.AggregatedListTargetInstancesRequest,
response: compute.TargetInstanceAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetInstanceList],
request: compute.ListTargetInstancesRequest,
response: compute.TargetInstanceList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetInstanceList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.TargetInstance]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | -1,835,709,528,351,142,000 | -5,628,424,767,871,511,000 | 36.272727 | 89 | 0.655923 | false |