repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
j00bar/ansible | test/units/playbook/test_helpers.py | 60 | 19182 | # (c) 2016, Adrian Likins <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
from units.mock.loader import DictDataLoader
from ansible import errors
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role.include import RoleInclude
from ansible.playbook import helpers
class MixinForMocks(object):
def _setup(self):
# This is not a very good mixin, lots of side effects
self.fake_loader = DictDataLoader({'include_test.yml': "",
'other_include_test.yml': ""})
self.mock_tqm = MagicMock(name='MockTaskQueueManager')
self.mock_play = MagicMock(name='MockPlay')
self.mock_iterator = MagicMock(name='MockIterator')
self.mock_iterator._play = self.mock_play
self.mock_inventory = MagicMock(name='MockInventory')
self.mock_inventory._hosts_cache = dict()
def _get_host(host_name):
return None
self.mock_inventory.get_host.side_effect = _get_host
# TODO: can we use a real VariableManager?
self.mock_variable_manager = MagicMock(name='MockVariableManager')
self.mock_variable_manager.get_vars.return_value = dict()
self.mock_block = MagicMock(name='MockBlock')
self.fake_role_loader = DictDataLoader({"/etc/ansible/roles/bogus_role/tasks/main.yml": """
- shell: echo 'hello world'
"""})
self._test_data_path = os.path.dirname(__file__)
self.fake_include_loader = DictDataLoader({"/dev/null/includes/test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello world'
""",
"/dev/null/includes/static_test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello static world'
""",
"/dev/null/includes/other_test_include.yml": """
- debug:
msg: other_test_include_debug
"""})
class TestLoadListOfTasks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def _assert_is_task_list(self, results):
for result in results:
self.assertIsInstance(result, Task)
def _assert_is_task_list_or_blocks(self, results):
self.assertIsInstance(results, list)
for result in results:
self.assertIsInstance(result, (Task, Block))
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_tasks,
ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_task(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_empty_task_use_handlers(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds,
use_handlers=True,
play=self.mock_play,
variable_manager=self.mock_variable_manager,
loader=self.fake_loader)
def test_one_bogus_block(self):
ds = [{'block': None}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_unknown_action(self):
action_name = 'foo_test_unknown_action'
ds = [{'action': action_name}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertEquals(res[0].action, action_name)
def test_block_unknown_action(self):
action_name = 'foo_test_block_unknown_action'
ds = [{
'block': [{'action': action_name}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def _assert_default_block(self, block):
# the expected defaults
self.assertIsInstance(block.block, list)
self.assertEquals(len(block.block), 1)
self.assertIsInstance(block.rescue, list)
self.assertEquals(len(block.rescue), 0)
self.assertIsInstance(block.always, list)
self.assertEquals(len(block.always), 0)
def test_block_unknown_action_use_handlers(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def test_one_bogus_block_use_handlers(self):
ds = [{'block': True}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_one_bogus_include(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_use_handlers(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_static(self):
ds = [{'include': 'somefile.yml',
'static': 'true'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_parent_include(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIsInstance(res[0]._parent, TaskInclude)
# TODO/FIXME: do this non deprecated way
def test_one_include_tags(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'tags': ['test_one_include_tags_tag1', 'and_another_tagB']
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tagB', res[0].tags)
# TODO/FIXME: do this non deprecated way
def test_one_parent_include_tags(self):
ds = [{'include': '/dev/null/includes/test_include.yml',
#'vars': {'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']}
'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']
}
]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_parent_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tag2', res[0].tags)
# It would be useful to be able to tell what kind of deprecation we encountered and where we encountered it.
def test_one_include_tags_deprecated_mixed(self):
ds = [{'include': "/dev/null/includes/other_test_include.yml",
'vars': {'tags': "['tag_on_include1', 'tag_on_include2']"},
'tags': 'mixed_tag1, mixed_tag2'
}]
self.assertRaisesRegexp(errors.AnsibleParserError, 'Mixing styles',
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
def test_one_include_tags_deprecated_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'vars': {'tags': ['include_tag1_deprecated', 'and_another_tagB_deprecated']}
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('include_tag1_deprecated', res[0].tags)
self.assertIn('and_another_tagB_deprecated', res[0].tags)
def test_one_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
def test_one_parent_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
# default for Handler
self.assertEquals(res[0].listen, None)
# TODO/FIXME: this doesn't seen right
# figure out how to get the non-static errors to be raised, this seems to just ignore everything
def test_one_include_not_static(self):
ds = [{
'include': '/dev/null/includes/static_test_include.yml',
'static': False
}]
#a_block = Block()
ti_ds = {'include': '/dev/null/includes/ssdftatic_test_include.yml'}
a_task_include = TaskInclude()
ti = a_task_include.load(ti_ds)
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=ti,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Task)
self.assertEquals(res[0].args['_raw_params'], '/dev/null/includes/static_test_include.yml')
# TODO/FIXME: This two get stuck trying to make a mock_block into a TaskInclude
# def test_one_include(self):
# ds = [{'include': 'other_test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
# def test_one_parent_include(self):
# ds = [{'include': 'test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
def test_one_bogus_include_role(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=self.mock_block,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_bogus_include_role_use_handlers(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
block=self.mock_block,
variable_manager=self.mock_variable_manager,
loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
class TestLoadListOfRoles(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_roles,
ds, self.mock_play)
def test_empty_role(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleError,
"role definitions must contain a role name",
helpers.load_list_of_roles,
ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
def test_empty_role_just_name(self):
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
def test_block_unknown_action(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
class TestLoadListOfBlocks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
mock_play = MagicMock(name='MockPlay')
self.assertRaises(AssertionError, helpers.load_list_of_blocks,
ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_block(self):
ds = [{}]
mock_play = MagicMock(name='MockPlay')
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_blocks,
ds, mock_play,
parent_block=None,
role=None,
task_include=None,
use_handlers=False,
variable_manager=None,
loader=None)
def test_block_unknown_action(self):
ds = [{'action': 'foo'}]
mock_play = MagicMock(name='MockPlay')
res = helpers.load_list_of_blocks(ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None,
loader=None)
self.assertIsInstance(res, list)
for block in res:
self.assertIsInstance(block, Block)
| gpl-3.0 |
SGCreations/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/jinja2/testsuite/security.py | 415 | 6204 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~
Checks the sandbox and other security features.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment
from jinja2.sandbox import SandboxedEnvironment, \
ImmutableSandboxedEnvironment, unsafe
from jinja2 import Markup, escape
from jinja2.exceptions import SecurityError, TemplateSyntaxError, \
TemplateRuntimeError
from jinja2._compat import text_type
class PrivateStuff(object):
def bar(self):
return 23
@unsafe
def foo(self):
return 42
def __repr__(self):
return 'PrivateStuff'
class PublicStuff(object):
bar = lambda self: 23
_foo = lambda self: 42
def __repr__(self):
return 'PublicStuff'
class SandboxTestCase(JinjaTestCase):
def test_unsafe(self):
env = SandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string("{{ foo.foo() }}").render,
foo=PrivateStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()), '23')
self.assert_raises(SecurityError, env.from_string("{{ foo._foo() }}").render,
foo=PublicStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()), '23')
self.assert_equal(env.from_string("{{ foo.__class__ }}").render(foo=42), '')
self.assert_equal(env.from_string("{{ foo.func_code }}").render(foo=lambda:None), '')
# security error comes from __class__ already.
self.assert_raises(SecurityError, env.from_string(
"{{ foo.__class__.__subclasses__() }}").render, foo=42)
def test_immutable_environment(self):
env = ImmutableSandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string(
'{{ [].append(23) }}').render)
self.assert_raises(SecurityError, env.from_string(
'{{ {1:2}.clear() }}').render)
def test_restricted(self):
env = SandboxedEnvironment()
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for item.attribute in seq %}...{% endfor %}")
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for foo, bar.baz in seq %}...{% endfor %}")
def test_markup_operations(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_template_data(self):
env = Environment(autoescape=True)
t = env.from_string('{% macro say_hello(name) %}'
'<p>Hello {{ name }}!</p>{% endmacro %}'
'{{ say_hello("<blink>foo</blink>") }}')
escaped_out = '<p>Hello <blink>foo</blink>!</p>'
assert t.render() == escaped_out
assert text_type(t.module) == escaped_out
assert escape(t.module) == escaped_out
assert t.module.say_hello('<blink>foo</blink>') == escaped_out
assert escape(t.module.say_hello('<blink>foo</blink>')) == escaped_out
def test_attr_filter(self):
env = SandboxedEnvironment()
tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}')
self.assert_raises(SecurityError, tmpl.render, cls=int)
def test_binary_operator_intercepting(self):
def disable_op(left, right):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('1 + 2', {}, '3'), ('a + 2', {'a': 2}, '4'):
env = SandboxedEnvironment()
env.binop_table['+'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_binops = frozenset(['+'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def test_unary_operator_intercepting(self):
def disable_op(arg):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('-1', {}, '-1'), ('-a', {'a': 2}, '-2'):
env = SandboxedEnvironment()
env.unop_table['-'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_unops = frozenset(['-'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SandboxTestCase))
return suite
| apache-2.0 |
jnerin/ansible | lib/ansible/module_utils/ovirt.py | 3 | 26081 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import inspect
import os
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from distutils.version import LooseVersion
try:
from enum import Enum # enum is a ovirtsdk4 requirement
import ovirtsdk4 as sdk
import ovirtsdk4.version as sdk_version
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.0.0')
except ImportError:
HAS_SDK = False
BYTES_MAP = {
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
}
def check_sdk(module):
if not HAS_SDK:
module.fail_json(
msg='ovirtsdk4 version 4.0.0 or higher is required for this module'
)
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
"""
Convert SDK Struct type into dictionary.
"""
res = {}
def remove_underscore(val):
if val.startswith('_'):
val = val[1:]
remove_underscore(val)
return val
def convert_value(value):
nested = False
if isinstance(value, sdk.Struct):
return get_dict_of_struct(value)
elif isinstance(value, Enum) or isinstance(value, datetime):
return str(value)
elif isinstance(value, list) or isinstance(value, sdk.List):
if isinstance(value, sdk.List) and fetch_nested and value.href:
try:
value = connection.follow_link(value)
nested = True
except sdk.Error:
value = []
ret = []
for i in value:
if isinstance(i, sdk.Struct):
if not nested:
ret.append(get_dict_of_struct(i))
else:
nested_obj = dict(
(attr, convert_value(getattr(i, attr)))
for attr in attributes if getattr(i, attr, None)
)
nested_obj['id'] = getattr(i, 'id', None),
ret.append(nested_obj)
elif isinstance(i, Enum):
ret.append(str(i))
else:
ret.append(i)
return ret
else:
return value
if struct is not None:
for key, value in struct.__dict__.items():
if value is None:
continue
key = remove_underscore(key)
res[key] = convert_value(value)
return res
def engine_version(connection):
"""
Return string representation of oVirt engine version.
"""
engine_api = connection.system_service().get()
engine_version = engine_api.product_info.version
return '%s.%s' % (engine_version.major, engine_version.minor)
def create_connection(auth):
"""
Create a connection to Python SDK, from task `auth` parameter.
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
url, username, password
If user has SSO token the `auth` dictionary has following parameters mandatory:
url, token
The `ca_file` parameter is mandatory in case user want to use secure connection,
in case user want to use insecure connection, it's mandatory to send insecure=True.
:param auth: dictionary which contains needed values for connection creation
:return: Python SDK connection
"""
return sdk.Connection(
url=auth.get('url'),
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
token=auth.get('token', None),
kerberos=auth.get('kerberos', None),
headers=auth.get('headers', None),
)
def convert_to_bytes(param):
"""
This method convert units to bytes, which follow IEC standard.
:param param: value to be converted
"""
if param is None:
return None
# Get rid of whitespaces:
param = ''.join(param.split())
# Convert to bytes:
if param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
elif param.isdigit():
return int(param) * 2**10
else:
raise ValueError(
"Unsupported value(IEC supported): '{value}'".format(value=param)
)
def follow_link(connection, link):
"""
This method returns the entity of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: entity which link points to
"""
if link:
return connection.follow_link(link)
else:
return None
def get_link_name(connection, link):
"""
This method returns the name of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: name of the entity, which link points to
"""
if link:
return connection.follow_link(link).name
else:
return None
def equal(param1, param2, ignore_case=False):
"""
Compare two parameters and return if they are equal.
This parameter doesn't run equal operation if first parameter is None.
With this approach we don't run equal operation in case user don't
specify parameter in their task.
:param param1: user inputted parameter
:param param2: value of entity parameter
:return: True if parameters are equal or first parameter is None, otherwise False
"""
if param1 is not None:
if ignore_case:
return param1.lower() == param2.lower()
return param1 == param2
return True
def search_by_attributes(service, list_params=None, **kwargs):
"""
Search for the entity by attributes. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by specified attributes.
"""
list_params = list_params or {}
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search=' and '.join('{0}={1}'.format(k, v) for k, v in kwargs.items()),
**list_params
)
else:
res = [
e for e in service.list(**list_params) if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def search_by_name(service, name, **kwargs):
"""
Search for the entity by its name. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by name.
:param service: service of the entity
:param name: name of the entity
:return: Entity object returned by Python SDK
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search="name={name}".format(name=name)
)
else:
res = [e for e in service.list() if e.name == name]
if kwargs:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def get_entity(service, get_params=None):
"""
Ignore SDK Error in case of getting an entity from service.
"""
entity = None
try:
if get_params is not None:
entity = service.get(**get_params)
else:
entity = service.get()
except sdk.Error:
# We can get here 404, we should ignore it, in case
# of removing entity for example.
pass
return entity
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
"""
Search an entity ID by it's name.
"""
entity = search_by_name(service, name)
if entity is not None:
return entity.id
if raise_error:
raise Exception("Entity '%s' was not found." % name)
def wait(
service,
condition,
fail_condition=lambda e: False,
timeout=180,
wait=True,
poll_interval=3,
):
"""
Wait until entity fulfill expected condition.
:param service: service of the entity
:param condition: condition to be fulfilled
:param fail_condition: if this condition is true, raise Exception
:param timeout: max time to wait in seconds
:param wait: if True wait for condition, if False don't wait
:param poll_interval: Number of seconds we should wait until next condition check
"""
# Wait until the desired state of the entity:
if wait:
start = time.time()
while time.time() < start + timeout:
# Exit if the condition of entity is valid:
entity = get_entity(service)
if condition(entity):
return
elif fail_condition(entity):
raise Exception("Error while waiting on result state of the entity.")
# Sleep for `poll_interval` seconds if none of the conditions apply:
time.sleep(float(poll_interval))
raise Exception("Timeout exceed while waiting on result state of the entity.")
def __get_auth_dict():
OVIRT_URL = os.environ.get('OVIRT_URL')
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
OVIRT_INSECURE = OVIRT_CAFILE is None
env_vars = None
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
env_vars = {
'url': OVIRT_URL,
'username': OVIRT_USERNAME,
'password': OVIRT_PASSWORD,
'insecure': OVIRT_INSECURE,
'token': OVIRT_TOKEN,
'ca_file': OVIRT_CAFILE,
}
if env_vars is not None:
auth = dict(default=env_vars, type='dict')
else:
auth = dict(required=True, type='dict')
return auth
def ovirt_facts_full_argument_spec(**kwargs):
"""
Extend parameters of facts module with parameters which are common to all
oVirt facts modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def ovirt_full_argument_spec(**kwargs):
"""
Extend parameters of module with parameters which are common to all oVirt modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
timeout=dict(default=180, type='int'),
wait=dict(default=True, type='bool'),
poll_interval=dict(default=3, type='int'),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def check_params(module):
"""
Most modules must have either `name` or `id` specified.
"""
if module.params.get('name') is None and module.params.get('id') is None:
module.fail_json(msg='"name" or "id" is required')
def engine_supported(connection, version):
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
def check_support(version, connection, module, params):
"""
Check if parameters used by user are supported by oVirt Python SDK
and oVirt engine.
"""
api_version = LooseVersion(engine_version(connection))
version = LooseVersion(version)
for param in params:
if module.params.get(param) is not None:
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
return True
class BaseModule(object):
"""
This is base class for oVirt modules. oVirt modules should inherit this
class and override method to customize specific needs of the module.
The only abstract method of this class is `build_entity`, which must
to be implemented in child class.
"""
__metaclass__ = ABCMeta
def __init__(self, connection, module, service, changed=False):
self._connection = connection
self._module = module
self._service = service
self._changed = changed
self._diff = {'after': dict(), 'before': dict()}
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, changed):
if not self._changed:
self._changed = changed
@abstractmethod
def build_entity(self):
"""
This method should return oVirt Python SDK type, which we want to
create or update, initialized by values passed by Ansible module.
For example if we want to create VM, we will return following:
types.Vm(name=self._module.params['vm_name'])
:return: Specific instance of sdk.Struct.
"""
pass
def param(self, name, default=None):
"""
Return a module parameter specified by it's name.
"""
return self._module.params.get(name, default)
def update_check(self, entity):
"""
This method handle checks whether the entity values are same as values
passed to ansible module. By default we don't compare any values.
:param entity: Entity we want to compare with Ansible module values.
:return: True if values are same, so we don't need to update the entity.
"""
return True
def pre_create(self, entity):
"""
This method is called right before entity is created.
:param entity: Entity to be created or updated.
"""
pass
def post_create(self, entity):
"""
This method is called right after entity is created.
:param entity: Entity which was created.
"""
pass
def post_update(self, entity):
"""
This method is called right after entity is updated.
:param entity: Entity which was updated.
"""
pass
def diff_update(self, after, update):
for k, v in update.items():
if isinstance(v, collections.Mapping):
after[k] = self.diff_update(after.get(k, dict()), v)
else:
after[k] = update[k]
return after
def create(
self,
entity=None,
result_state=None,
fail_condition=lambda e: False,
search_params=None,
update_params=None,
**kwargs
):
"""
Method which is called when state of the entity is 'present'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's updated, whether
the entity should be updated is checked by `update_check` method.
The corresponding updated entity is build by `build_entity` method.
Function executed after entity is created can optionally be specified
in `post_create` parameter. Function executed after entity is updated
can optionally be specified in `post_update` parameter.
:param entity: Entity we want to update, if exists.
:param result_state: State which should entity has in order to finish task.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param search_params: Dictionary of parameters to be used for search.
:param update_params: The params which should be passed to update method.
:param kwargs: Additional parameters passed when creating entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
self.pre_create(entity)
if entity:
# Entity exists, so update it:
entity_service = self._service.service(entity.id)
if not self.update_check(entity):
new_entity = self.build_entity()
if not self._module.check_mode:
update_params = update_params or {}
updated_entity = entity_service.update(
new_entity,
**update_params
)
self.post_update(entity)
# Update diffs only if user specified --diff parameter,
# so we don't useless overload API:
if self._module._diff:
before = get_dict_of_struct(
entity,
self._connection,
fetch_nested=True,
attributes=['name'],
)
after = before.copy()
self.diff_update(after, get_dict_of_struct(new_entity))
self._diff['before'] = before
self._diff['after'] = after
self.changed = True
else:
# Entity don't exists, so create it:
if not self._module.check_mode:
entity = self._service.add(
self.build_entity(),
**kwargs
)
self.post_create(entity)
self.changed = True
# Wait for the entity to be created and to be in the defined state:
entity_service = self._service.service(entity.id)
def state_condition(entity):
return entity
if result_state:
def state_condition(entity):
return entity and entity.status == result_state
wait(
service=entity_service,
condition=state_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def pre_remove(self, entity):
"""
This method is called right before entity is removed.
:param entity: Entity which we want to remove.
"""
pass
def entity_name(self, entity):
return "{e_type} '{e_name}'".format(
e_type=type(entity).__name__.lower(),
e_name=getattr(entity, 'name', None),
)
def remove(self, entity=None, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'absent'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's removed.
Function executed before remove is executed can optionally be specified
in `pre_remove` parameter.
:param entity: Entity we want to remove.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when removing entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
if entity is None:
return {
'changed': self.changed,
'msg': "Entity wasn't found."
}
self.pre_remove(entity)
entity_service = self._service.service(entity.id)
if not self._module.check_mode:
entity_service.remove(**kwargs)
wait(
service=entity_service,
condition=lambda entity: not entity,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
self.changed = True
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def action(
self,
action,
entity=None,
action_condition=lambda e: e,
wait_condition=lambda e: e,
fail_condition=lambda e: False,
pre_action=lambda e: e,
post_action=lambda e: None,
search_params=None,
**kwargs
):
"""
This method is executed when we want to change the state of some oVirt
entity. The action to be executed on oVirt service is specified by
`action` parameter. Whether the action should be executed can be
specified by passing `action_condition` parameter. State which the
entity should be in after execution of the action can be specified
by `wait_condition` parameter.
Function executed before an action on entity can optionally be specified
in `pre_action` parameter. Function executed after an action on entity can
optionally be specified in `post_action` parameter.
:param action: Action which should be executed by service on entity.
:param entity: Entity we want to run action on.
:param action_condition: Function which is executed when checking if action should be executed.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param wait_condition: Function which is executed when waiting on result state.
:param pre_action: Function which is executed before running the action.
:param post_action: Function which is executed after running the action.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed to action.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
entity = pre_action(entity)
if entity is None:
self._module.fail_json(
msg="Entity not found, can't run action '{0}'.".format(
action
)
)
entity_service = self._service.service(entity.id)
entity = entity_service.get()
if action_condition(entity):
if not self._module.check_mode:
getattr(entity_service, action)(**kwargs)
self.changed = True
post_action(entity)
wait(
service=self._service.service(entity.id),
condition=wait_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def wait_for_import(self, condition=lambda e: True):
if self._module.params['wait']:
start = time.time()
timeout = self._module.params['timeout']
poll_interval = self._module.params['poll_interval']
while time.time() < start + timeout:
entity = self.search_entity()
if entity and condition(entity):
return entity
time.sleep(poll_interval)
def search_entity(self, search_params=None, list_params=None):
"""
Always first try to search by `ID`, if ID isn't specified,
check if user constructed special search in `search_params`,
if not search by `name`.
"""
entity = None
if 'id' in self._module.params and self._module.params['id'] is not None:
entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
elif search_params is not None:
entity = search_by_attributes(self._service, list_params=list_params, **search_params)
elif self._module.params.get('name') is not None:
entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
return entity
| gpl-3.0 |
lenstr/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/multipart/collect.py | 248 | 1584 | #!/usr/bin/env python
"""
Hacky helper application to collect form data.
"""
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
def copy_stream(request):
from os import mkdir
from time import time
folder = 'request-%d' % time()
mkdir(folder)
environ = request.environ
f = open(folder + '/request.txt', 'wb+')
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
f.flush()
f.seek(0)
environ['wsgi.input'] = f
request.stat_folder = folder
def stats(request):
copy_stream(request)
f1 = request.files['file1']
f2 = request.files['file2']
text = request.form['text']
f1.save(request.stat_folder + '/file1.bin')
f2.save(request.stat_folder + '/file2.bin')
open(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
return Response('Done.')
def upload_file(request):
return Response('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="file1"><br>
<input type="file" name="file2"><br>
<textarea name="text"></textarea><br>
<input type="submit" value="Send">
</form>
''', mimetype='text/html')
def application(environ, start_responseonse):
request = Request(environ)
if request.method == 'POST':
response = stats(request)
else:
response = upload_file(request)
return response(environ, start_responseonse)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
| agpl-3.0 |
philippjfr/bokeh | bokeh/embed/tests/test_server.py | 1 | 11784 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
from bokeh.util.api import INTERNAL, PUBLIC ; INTERNAL, PUBLIC
from bokeh.util.testing import verify_api ; verify_api
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import bs4
# Bokeh imports
# Module under test
import bokeh.embed.server as bes
#-----------------------------------------------------------------------------
# API Definition
#-----------------------------------------------------------------------------
api = {
PUBLIC: (
( 'server_document', (1, 0, 0) ),
( 'server_session', (1, 0, 0) ),
), INTERNAL: (
( 'server_html_page_for_session', (1, 0, 0) ),
)
}
Test_api = verify_api(bes, api)
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
@pytest.fixture
def test_plot():
from bokeh.plotting import figure
test_plot = figure()
test_plot.circle([1, 2], [2, 3])
return test_plot
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
class TestServerDocument(object):
def test_invalid_resources_param(self):
with pytest.raises(ValueError):
bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources=123)
with pytest.raises(ValueError):
bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources="whatever")
def test_resources_default_is_implicit(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources="default")
assert 'resources=' not in r
def test_resources_none(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources=None)
assert 'resources=none' in r
def test_general(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders")
assert 'bokeh-app-path=/foo/bar/sliders' in r
assert 'bokeh-absolute-url=http://localhost:8081/foo/bar/sliders' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders&bokeh-absolute-url=%s" % \
("http://localhost:8081/foo/bar/sliders", divid, "http://localhost:8081/foo/bar/sliders")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
def test_script_attrs_arguments_provided(self):
r = bes.server_document(arguments=dict(foo=10))
assert 'foo=10' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&foo=10" % \
("http://localhost:5006", divid, "http://localhost:5006")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
def test_script_attrs_url_provided_absolute_resources(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders")
assert 'bokeh-app-path=/foo/bar/sliders' in r
assert 'bokeh-absolute-url=http://localhost:8081/foo/bar/sliders' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders&bokeh-absolute-url=%s" % \
("http://localhost:8081/foo/bar/sliders", divid, "http://localhost:8081/foo/bar/sliders")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
def test_script_attrs_url_provided(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders", relative_urls=True)
assert 'bokeh-app-path=/foo/bar/sliders' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders" % \
("http://localhost:8081/foo/bar/sliders", divid)
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
class TestServerSession(object):
def test_return_type(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession')
assert isinstance(r, str)
def test_script_attrs_session_id_provided(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession')
assert 'bokeh-session-id=fakesession' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&bokeh-session-id=fakesession" % \
("http://localhost:5006", divid, "http://localhost:5006")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : str(test_plot._id),
'id' : divid,
'src' : src }
def test_invalid_resources_param(self, test_plot):
with pytest.raises(ValueError):
bes.server_session(test_plot, session_id='fakesession', resources=123)
with pytest.raises(ValueError):
bes.server_session(test_plot, session_id='fakesession', resources="whatever")
def test_resources_default_is_implicit(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession', resources="default")
assert 'resources=' not in r
def test_resources_none(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession', resources=None)
assert 'resources=none' in r
def test_general(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession')
assert 'bokeh-session-id=fakesession' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&bokeh-session-id=fakesession" % \
("http://localhost:5006", divid, "http://localhost:5006")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : str(test_plot._id),
'id' : divid,
'src' : src }
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__clean_url(object):
def test_default(self):
assert bes._clean_url("default") == bes.DEFAULT_SERVER_HTTP_URL.rstrip("/")
def test_bad_ws(self):
with pytest.raises(ValueError):
bes._clean_url("ws://foo")
def test_arg(self):
assert bes._clean_url("http://foo/bar") == "http://foo/bar"
assert bes._clean_url("http://foo/bar/") == "http://foo/bar"
class Test__get_app_path(object):
def test_arg(self):
assert bes._get_app_path("foo") == "/foo"
assert bes._get_app_path("http://foo") == "/"
assert bes._get_app_path("http://foo/bar") == "/bar"
assert bes._get_app_path("https://foo") == "/"
assert bes._get_app_path("https://foo/bar") == "/bar"
class Test__process_arguments(object):
def test_None(self):
assert bes._process_arguments(None) == ""
def test_args(self):
args = dict(foo=10, bar="baz")
r = bes._process_arguments(args)
# order unspecified
assert r == "&foo=10&bar=baz" or r == "&bar=baz&foo=10"
def test_args_ignores_bokeh_prefixed(self):
args = dict(foo=10, bar="baz")
args["bokeh-junk"] = 20
r = bes._process_arguments(args)
# order unspecified
assert r == "&foo=10&bar=baz" or r == "&bar=baz&foo=10"
class Test__process_app_path(object):
def test_root(self):
assert bes._process_app_path("/") == ""
def test_arg(self):
assert bes._process_app_path("/stuff") == "&bokeh-app-path=/stuff"
class Test__process_relative_urls(object):
def test_True(self):
assert bes._process_relative_urls(True, "") == ""
assert bes._process_relative_urls(True, "/stuff") == ""
def test_Flase(self):
assert bes._process_relative_urls(False, "/stuff") == "&bokeh-absolute-url=/stuff"
class Test__process_resources(object):
def test_bad_input(self):
with pytest.raises(ValueError):
bes._process_resources("foo")
def test_None(self):
assert bes._process_resources(None) == "&resources=none"
def test_default(self):
assert bes._process_resources("default") == ""
class Test__process_session_id(object):
def test_arg(self):
assert bes._process_session_id("foo123") == "&bokeh-session-id=foo123"
def Test__src_path(object):
def test_args(self):
assert bes._src_path("http://foo", "1234") =="http://foo/autoload.js?bokeh-autoload-element=1234"
| bsd-3-clause |
google/gfw-toolkit | toolkit/third_party/oauth2client/client.py | 122 | 44282 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client.
Tools for interacting with OAuth 2.0 protected resources.
"""
__author__ = '[email protected] (Joe Gregorio)'
import base64
import clientsecrets
import copy
import datetime
import httplib2
import logging
import os
import sys
import time
import urllib
import urlparse
from oauth2client import GOOGLE_AUTH_URI
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import util
from oauth2client.anyjson import simplejson
HAS_OPENSSL = False
HAS_CRYPTO = False
try:
from oauth2client import crypt
HAS_CRYPTO = True
if crypt.OpenSSLVerifier is not None:
HAS_OPENSSL = True
except ImportError:
pass
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
# Expiry is stored in RFC3339 UTC format
EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Which certs to use to validate id_tokens received.
ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
# Constant to use for the out of band OAuth 2.0 flow.
OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
# Google Data client libraries may need to set this to [401, 403].
REFRESH_STATUS_CODES = [401]
class Error(Exception):
"""Base error for this module."""
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
class TokenRevokeError(Error):
"""Error trying to revoke a token."""
class UnknownClientSecretsFlowError(Error):
"""The client secrets file called for an unknown type of OAuth 2.0 flow. """
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
class VerifyJwtTokenError(Error):
"""Could on retrieve certificates for validation."""
class NonAsciiHeaderError(Error):
"""Header names and values must be ASCII strings."""
def _abstract():
raise NotImplementedError('You need to override this function')
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method that applies the credentials to
an HTTP transport.
Subclasses must also specify a classmethod named 'from_json' that takes a JSON
string as input and returns an instaniated Credentials object.
"""
NON_SERIALIZED_MEMBERS = ['store']
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and authorizes it.
Authorizes it for the set of credentials, usually by replacing
http.request() with a method that adds in the appropriate headers and then
delegates to the original Http.request() method.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def revoke(self, http):
"""Revokes a refresh_token and makes the credentials void.
Args:
http: httplib2.Http, an http object to be used to make the revoke
request.
"""
_abstract()
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
_abstract()
def _to_json(self, strip):
"""Utility function that creates JSON repr. of a Credentials object.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
for member in strip:
if member in d:
del d[member]
if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime):
d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
# Add in information we will need later to reconsistitue this instance.
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a Credentials subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
try:
m = __import__(module)
except ImportError:
# In case there's an object from the old package structure, update it
module = module.replace('.apiclient', '')
m = __import__(module)
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
return Credentials()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential. This class supports locking
such that multiple processes and threads can operate on a single
store.
"""
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
pass
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
pass
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
"""
_abstract()
def get(self):
"""Retrieve credential.
The Storage lock must *not* be held when this is called.
Returns:
oauth2client.client.Credentials
"""
self.acquire_lock()
try:
return self.locked_get()
finally:
self.release_lock()
def put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
self.acquire_lock()
try:
return self.locked_delete()
finally:
self.release_lock()
def clean_headers(headers):
"""Forces header keys and values to be strings, i.e not unicode.
The httplib module just concats the header keys and values in a way that may
make the message header a unicode string, which, if it then tries to
contatenate to a binary request body may result in a unicode decode error.
Args:
headers: dict, A dictionary of headers.
Returns:
The same dictionary but with all the keys converted to strings.
"""
clean = {}
try:
for k, v in headers.iteritems():
clean[str(k)] = str(v)
except UnicodeEncodeError:
raise NonAsciiHeaderError(k + ': ' + v)
return clean
def _update_query_params(uri, params):
"""Updates a URI with new query parameters.
Args:
uri: string, A valid URI, with potential existing query parameters.
params: dict, A dictionary of query parameters.
Returns:
The same URI but with the new query parameters added.
"""
parts = list(urlparse.urlparse(uri))
query_params = dict(parse_qsl(parts[4])) # 4 is the index of the query part
query_params.update(params)
parts[4] = urllib.urlencode(query_params)
return urlparse.urlunparse(parts)
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then adds the OAuth 2.0 access token to each request.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
@util.positional(8)
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent, revoke_uri=None,
id_token=None, token_response=None):
"""Create an instance of OAuth2Credentials.
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
access_token: string, access token.
client_id: string, client identifier.
client_secret: string, client secret.
refresh_token: string, refresh token.
token_expiry: datetime, when the access_token expires.
token_uri: string, URI of token endpoint.
user_agent: string, The HTTP User-Agent to provide for this application.
revoke_uri: string, URI for revoke endpoint. Defaults to None; a token
can't be revoked if this is None.
id_token: object, The identity of the resource owner.
token_response: dict, the decoded response to the token request. None
if a token hasn't been requested yet. Stored because some providers
(e.g. wordpress.com) include extra fields that clients may want.
Notes:
store: callable, A callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
self.revoke_uri = revoke_uri
self.id_token = id_token
self.token_response = token_response
# True if the credentials have been revoked or expired and can't be
# refreshed.
self.invalid = False
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
The modified http.request method will add authentication headers to each
request and will refresh access_tokens when a 401 is received on a
request. In addition the http.request method has a credentials property,
http.request.credentials, which is the Credentials object that authorized
it.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth subclass of httplib2.Authenication
because it never gets passed the absolute URI, which is needed for
signing. So instead we have to overload 'request' with a closure
that adds in the Authorization header and then calls the original
version of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
@util.positional(1)
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if not self.access_token:
logger.info('Attempting refresh to obtain initial access_token')
self._refresh(request_orig)
# Modify the request headers to add the appropriate
# Authorization header.
if headers is None:
headers = {}
self.apply(headers)
if self.user_agent is not None:
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, clean_headers(headers),
redirections, connection_type)
if resp.status in REFRESH_STATUS_CODES:
logger.info('Refreshing due to a %s' % str(resp.status))
self._refresh(request_orig)
self.apply(headers)
return request_orig(uri, method, body, clean_headers(headers),
redirections, connection_type)
else:
return (resp, content)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
setattr(http.request, 'credentials', self)
return http
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
self._refresh(http.request)
def revoke(self, http):
"""Revokes a refresh_token and makes the credentials void.
Args:
http: httplib2.Http, an http object to be used to make the revoke
request.
"""
self._revoke(http.request)
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
headers['Authorization'] = 'Bearer ' + self.access_token
def to_json(self):
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it. The JSON
should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
data = simplejson.loads(s)
if 'token_expiry' in data and not isinstance(data['token_expiry'],
datetime.datetime):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except:
data['token_expiry'] = None
retval = cls(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
revoke_uri=data.get('revoke_uri', None),
id_token=data.get('id_token', None),
token_response=data.get('token_response', None))
retval.invalid = data['invalid']
return retval
@property
def access_token_expired(self):
"""True if the credential is expired or invalid.
If the token_expiry isn't set, we assume the token doesn't expire.
"""
if self.invalid:
return True
if not self.token_expiry:
return False
now = datetime.datetime.utcnow()
if now >= self.token_expiry:
logger.info('access_token is expired. Now: %s, token_expiry: %s',
now, self.token_expiry)
return True
return False
def set_store(self, store):
"""Set the Storage for the credential.
Args:
store: Storage, an implementation of Stroage object.
This is needed to store the latest access_token if it
has expired and been refreshed. This implementation uses
locking to check for updates before updating the
access_token.
"""
self.store = store
def _updateFromCredential(self, other):
"""Update this Credential from another instance."""
self.__dict__.update(other.__getstate__())
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body
def _generate_refresh_request_headers(self):
"""Generate the headers that will be used in the refresh request."""
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
return headers
def _refresh(self, http_request):
"""Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
if not self.store:
self._do_refresh_request(http_request)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http_request)
finally:
self.store.release_lock()
def _do_refresh_request(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.token_response = d
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or revoked,
# so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self.invalid = True
if self.store:
self.store.locked_put(self)
except StandardError:
pass
raise AccessTokenRefreshError(error_msg)
def _revoke(self, http_request):
"""Revokes the refresh_token and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the revoke request.
"""
self._do_revoke(http_request, self.refresh_token)
def _do_revoke(self, http_request, token):
"""Revokes the credentials and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
token: A string used as the token to be revoked. Can be either an
access_token or refresh_token.
Raises:
TokenRevokeError: If the revoke request does not return with a 200 OK.
"""
logger.info('Revoking token')
query_params = {'token': token}
token_revoke_uri = _update_query_params(self.revoke_uri, query_params)
resp, content = http_request(token_revoke_uri)
if resp.status == 200:
self.invalid = True
else:
error_msg = 'Invalid response %s.' % resp.status
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except StandardError:
pass
raise TokenRevokeError(error_msg)
if self.store:
self.store.delete()
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the
authorize() method, which then signs each request from that object
with the OAuth 2.0 access token. This set of credentials is for the
use case where you have acquired an OAuth 2.0 access_token from
another place such as a JavaScript client or another web
application, and wish to use it from Python. Because only the
access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent, revoke_uri=None):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
revoke_uri: string, URI for revoke endpoint. Defaults to None; a token
can't be revoked if this is None.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent,
revoke_uri=revoke_uri)
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = AccessTokenCredentials(
data['access_token'],
data['user_agent'])
return retval
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
'The access_token is expired or invalid and can\'t be refreshed.')
def _revoke(self, http_request):
"""Revokes the access_token and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the revoke request.
"""
self._do_revoke(http_request, self.access_token)
class AssertionCredentials(OAuth2Credentials):
"""Abstract Credentials object used for OAuth 2.0 assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens. It must
be subclassed to generate the appropriate assertion string.
AssertionCredentials objects may be safely pickled and unpickled.
"""
@util.positional(2)
def __init__(self, assertion_type, user_agent=None,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the auth
server
user_agent: string, The HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent,
revoke_uri=revoke_uri)
self.assertion_type = assertion_type
def _generate_refresh_request_body(self):
assertion = self._generate_assertion()
body = urllib.urlencode({
'assertion': assertion,
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
})
return body
def _generate_assertion(self):
"""Generate the assertion string that will be used in the access token
request.
"""
_abstract()
def _revoke(self, http_request):
"""Revokes the access_token and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the revoke request.
"""
self._do_revoke(http_request, self.access_token)
if HAS_CRYPTO:
# PyOpenSSL and PyCrypto are not prerequisites for oauth2client, so if it is
# missing then don't create the SignedJwtAssertionCredentials or the
# verify_id_token() method.
class SignedJwtAssertionCredentials(AssertionCredentials):
"""Credentials object used for OAuth 2.0 Signed JWT assertion grants.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
SignedJwtAssertionCredentials requires either PyOpenSSL, or PyCrypto 2.6 or
later. For App Engine you may also consider using AppAssertionCredentials.
"""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
@util.positional(4)
def __init__(self,
service_account_name,
private_key,
scope,
private_key_password='notasecret',
user_agent=None,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
**kwargs):
"""Constructor for SignedJwtAssertionCredentials.
Args:
service_account_name: string, id for account, usually an email address.
private_key: string, private key in PKCS12 or PEM format.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
private_key_password: string, password for private_key, unused if
private_key is in PEM format.
user_agent: string, HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint.
kwargs: kwargs, Additional parameters to add to the JWT token, for
example [email protected]."""
super(SignedJwtAssertionCredentials, self).__init__(
None,
user_agent=user_agent,
token_uri=token_uri,
revoke_uri=revoke_uri,
)
self.scope = util.scopes_to_string(scope)
# Keep base64 encoded so it can be stored in JSON.
self.private_key = base64.b64encode(private_key)
self.private_key_password = private_key_password
self.service_account_name = service_account_name
self.kwargs = kwargs
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = SignedJwtAssertionCredentials(
data['service_account_name'],
base64.b64decode(data['private_key']),
data['scope'],
private_key_password=data['private_key_password'],
user_agent=data['user_agent'],
token_uri=data['token_uri'],
**data['kwargs']
)
retval.invalid = data['invalid']
retval.access_token = data['access_token']
return retval
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = long(time.time())
payload = {
'aud': self.token_uri,
'scope': self.scope,
'iat': now,
'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_name
}
payload.update(self.kwargs)
logger.debug(str(payload))
private_key = base64.b64decode(self.private_key)
return crypt.make_signed_jwt(crypt.Signer.from_string(
private_key, self.private_key_password), payload)
# Only used in verify_id_token(), which is always calling to the same URI
# for the certs.
_cached_http = httplib2.Http(MemoryCache())
@util.positional(2)
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATON_CERTS):
"""Verifies a signed JWT id_token.
This function requires PyOpenSSL and because of that it does not work on
App Engine.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError if the JWT fails to verify.
"""
if http is None:
http = _cached_http
resp, content = http.request(cert_uri)
if resp.status == 200:
certs = simplejson.loads(content)
return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: %d' % resp.status)
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
segments = id_token.split('.')
if (len(segments) != 3):
raise VerifyJwtTokenError(
'Wrong number of segments in token: %s' % id_token)
return simplejson.loads(_urlsafe_b64decode(segments[1]))
def _parse_exchange_token_response(content):
"""Parses response of an exchange token request.
Most providers return JSON but some (e.g. Facebook) return a
url-encoded string.
Args:
content: The body of a response
Returns:
Content as a dictionary object. Note that the dict could be empty,
i.e. {}. That basically indicates a failure.
"""
resp = {}
try:
resp = simplejson.loads(content)
except StandardError:
# different JSON libs raise different exceptions,
# so we just do a catch-all here
resp = dict(parse_qsl(content))
# some providers respond with 'expires', others with 'expires_in'
if resp and 'expires' in resp:
resp['expires_in'] = resp.pop('expires')
return resp
@util.positional(4)
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri='postmessage', http=None,
user_agent=None, token_uri=GOOGLE_TOKEN_URI,
auth_uri=GOOGLE_AUTH_URI,
revoke_uri=GOOGLE_REVOKE_URI):
"""Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or iterable of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
"""
flow = OAuth2WebServerFlow(client_id, client_secret, scope,
redirect_uri=redirect_uri, user_agent=user_agent,
auth_uri=auth_uri, token_uri=token_uri,
revoke_uri=revoke_uri)
credentials = flow.step2_exchange(code, http=http)
return credentials
@util.positional(3)
def credentials_from_clientsecrets_and_code(filename, scope, code,
message = None,
redirect_uri='postmessage',
http=None,
cache=None):
"""Returns OAuth2Credentials from a clientsecrets file and an auth code.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of clientsecrets.
scope: string or iterable of strings, scope(s) to request.
code: string, An authorization code, most likely passed down from
the client
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
flow = flow_from_clientsecrets(filename, scope, message=message, cache=cache,
redirect_uri=redirect_uri)
credentials = flow.step2_exchange(code, http=http)
return credentials
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2WebServerFlow objects may be safely pickled and unpickled.
"""
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
redirect_uri=None,
user_agent=None,
auth_uri=GOOGLE_AUTH_URI,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
**kwargs):
"""Constructor for OAuth2WebServerFlow.
The kwargs argument is used to set extra query parameters on the
auth_uri. For example, the access_type and approval_prompt
query parameters can be set via kwargs.
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
self.scope = util.scopes_to_string(scope)
self.redirect_uri = redirect_uri
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.revoke_uri = revoke_uri
self.params = {
'access_type': 'offline',
'response_type': 'code',
}
self.params.update(kwargs)
@util.positional(1)
def step1_get_authorize_url(self, redirect_uri=None):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server. This parameter is deprecated, please move to
passing the redirect_uri in via the constructor.
Returns:
A URI as a string to redirect the user to begin the authorization flow.
"""
if redirect_uri is not None:
logger.warning(('The redirect_uri parameter for'
'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. Please'
'move to passing the redirect_uri in via the constructor.'))
self.redirect_uri = redirect_uri
if self.redirect_uri is None:
raise ValueError('The value of redirect_uri must not be None.')
query_params = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
}
query_params.update(self.params)
return _update_query_params(self.auth_uri, query_params)
@util.positional(2)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError if a problem occured exchanging the code for a
refresh_token.
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
if 'code' not in code:
if 'error' in code:
error_msg = code['error']
else:
error_msg = 'No code was supplied in the query parameters.'
raise FlowExchangeError(error_msg)
else:
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body,
headers=headers)
d = _parse_exchange_token_response(content)
if resp.status == 200 and 'access_token' in d:
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(d['expires_in']))
if 'id_token' in d:
d['id_token'] = _extract_id_token(d['id_token'])
logger.info('Successfully retrieved access token')
return OAuth2Credentials(access_token, self.client_id,
self.client_secret, refresh_token, token_expiry,
self.token_uri, self.user_agent,
revoke_uri=self.revoke_uri,
id_token=d.get('id_token', None),
token_response=d)
else:
logger.info('Failed to retrieve access token: %s' % content)
if 'error' in d:
# you never know what those providers got to say
error_msg = unicode(d['error'])
else:
error_msg = 'Invalid response: %s.' % str(resp.status)
raise FlowExchangeError(error_msg)
@util.positional(2)
def flow_from_clientsecrets(filename, scope, redirect_uri=None,
message=None, cache=None):
"""Create a Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or iterable of strings, scope(s) to request.
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type in (clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED):
constructor_kwargs = {
'redirect_uri': redirect_uri,
'auth_uri': client_info['auth_uri'],
'token_uri': client_info['token_uri'],
}
revoke_uri = client_info.get('revoke_uri')
if revoke_uri is not None:
constructor_kwargs['revoke_uri'] = revoke_uri
return OAuth2WebServerFlow(
client_info['client_id'], client_info['client_secret'],
scope, **constructor_kwargs)
except clientsecrets.InvalidClientSecretsError:
if message:
sys.exit(message)
else:
raise
else:
raise UnknownClientSecretsFlowError(
'This OAuth 2.0 flow is unsupported: %r' % client_type)
| apache-2.0 |
40023256/2015cdag1man | static/Brython3.1.1-20150328-091302/Lib/_string.py | 625 | 1112 | """string helper module"""
import re
class __loader__(object):
pass
def formatter_field_name_split(fieldname):
"""split the argument as a field name"""
_list=[]
for _name in fieldname:
_parts = _name.split('.')
for _item in _parts:
is_attr=False #fix me
if re.match('\d+', _item):
_list.append((int(_item), is_attr))
else:
_list.append((_item, is_attr))
return _list[0][0], iter(_list[1:])
def formatter_parser(*args,**kw):
"""parse the argument as a format string"""
assert len(args)==1
assert isinstance(args[0], str)
_result=[]
for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]):
_pre, _fmt = _match.groups()
if _fmt is None:
_result.append((_pre, None, None, None))
elif _fmt == '{}':
_result.append((_pre, '', '', None))
else:
_m=re.match("\{([^!]*)!?(.*)?\}", _fmt)
_name=_m.groups(0)
_flags=_m.groups(1)
_result.append((_pre, _name, _flags, None))
return _result
| gpl-3.0 |
ptrendx/mxnet | tests/nightly/test_kvstore.py | 12 | 10372 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
sys.path.insert(0, "../../python/")
import mxnet as mx
import numpy as np
import numpy.random as rnd
import copy
from mxnet.test_utils import assert_almost_equal
def check_diff_to_scalar(A, x, rank=None):
""" assert A == x"""
assert(np.sum(np.abs((A - x).asnumpy())) == 0), (rank, A.asnumpy(), x)
def compute_expected_2bit_quantization(arr, curr_residual, threshold):
from struct import pack,unpack
def bits2int(bits):
bits = [int(x) for x in bits[::-1]]
x = 0
for i in range(len(bits)):
x += bits[i]*2**i
return x
def as_float32(s):
return unpack("f",pack("I", bits2int(s)))[0]
# str_quant stores the quantized representation as a sequence of bits
str_quant = ''
new_residual = []
decompr = []
arr_npy = arr.asnumpy()
for i, a in np.ndenumerate(arr_npy):
a += curr_residual[i]
if a >= threshold:
str_quant += '11'
new_residual.append(a - threshold)
decompr.append(threshold)
elif a <= (-1*threshold):
str_quant += '10'
new_residual.append(a + threshold)
decompr.append(-1*threshold)
else:
str_quant += '00'
new_residual.append(a)
decompr.append(0)
# append extra bits when size of array not a factor of 16
if len(str_quant)%16 != 0:
str_quant += '0'*(16 - len(str_quant)%16)
compr = []
# converts the string generated into integers 32chars at a time
i = 0
while i<len(str_quant):
cur_float = str_quant[i+24:i+32] + str_quant[i+16:i+24] + str_quant[i+8:i+16] + str_quant[i:i+8]
compr.append(as_float32(cur_float))
i+=32
return np.array(compr), np.array(new_residual).reshape(arr.shape), np.array(decompr).reshape(arr.shape)
## individual key interface
def test_kvstore(kv_type, stype):
print(kv_type)
kv = mx.kv.create(kv_type)
kv.set_optimizer(mx.optimizer.create('test', rescale_grad=lr))
for k, s in zip(keys, shapes):
kv.init(k, mx.nd.zeros(s))
res = [np.zeros(s) for s in shapes]
for i in range(nrepeat):
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.array(
data[i][j][g], mx.gpu(g)).tostype(stype) for g in range(nworker)])
res = [a + b * lr for a, b in zip(res, [sum(d) for d in data[i]])]
for j in range(len(keys)):
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
err = [np.sum(np.abs(o.asnumpy() - res[j])) for o in out]
err = sum(err) / np.sum(np.abs(res[j]))
assert(err < 1e-6), (err, shapes[j])
def test_compress_kvstore(kv_type, compression='2bit', threshold=0.5):
print(kv_type + ' with ' + compression + ' compression')
rate = 2
kv = mx.kv.create(kv_type)
kv.set_gradient_compression({'type':compression, 'threshold':threshold})
kv.set_optimizer(mx.optimizer.create('test', rescale_grad=rate))
for k, s in zip(keys, shapes):
kv.init(k, mx.nd.zeros(s))
# init one key with 1s so we can check if it was compressed during init
kv.init(gc_init_test_key, mx.nd.ones(shapes[0]))
# use different keys for random tests so that
# we can track residual from start
random_keys = [13, 15, 17]
for k, s in zip(random_keys, shapes):
kv.init(k, mx.nd.zeros(s))
def pull_init_test(kv):
# checks that compression is not applied to init of key
out = [mx.nd.zeros(shapes[0], mx.gpu(g)) for g in range(nworker)]
kv.pull(gc_init_test_key, out=out)
exp = np.ones_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
def pull_before_push(kv):
for i in range(nrepeat):
for j in range(len(keys)):
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
exp = np.zeros_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
def push_zeros(kv):
for i in range(nrepeat):
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)])
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
exp = np.zeros_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
def verify_residual(kv, threshold, rate):
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*0.4 for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
for o in out:
check_diff_to_scalar(o, 0)
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*(threshold-0.3) for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
curval = threshold * rate * nworker
for o in out:
check_diff_to_scalar(o, curval)
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*(0.2) for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
for o in out:
check_diff_to_scalar(o, curval)
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*(threshold-0.3) for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
curval += threshold*rate*nworker
for o in out:
check_diff_to_scalar(o, curval)
# residual would be 0 now
return curval
def check_neg(kv, neg, rate, curval):
for r in range(nrepeat):
curval = curval + rate*nworker*neg
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*neg for g in range(nworker)])
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
for o in out:
check_diff_to_scalar(o, curval)
# residual would be 0 again
def check_compr_random(kv, threshold):
for k, s in zip(random_keys, shapes):
curr_residual = [np.zeros(s) for g in range(nworker)]
orig_val = [mx.nd.zeros(s, mx.gpu(g)) for g in range(nworker)]
kv.pull(k, out=orig_val)
grads = [mx.nd.random_uniform(-0.6, 0.6, shape=s, ctx=mx.gpu(g)) for g in range(nworker)]
grads_cpy = copy.deepcopy(grads)
kv.push(k, grads)
val = [mx.nd.zeros(s, mx.gpu(g)) for g in range(nworker)]
kv.pull(k, out=val)
diffs = [val[g] - orig_val[g] for g in range(nworker)]
# compute expected by using simulation of operator
# on cpu
sum_dequantized_vals = np.zeros(s)
for g in range(nworker):
compr, curr_residual[g], decompr = compute_expected_2bit_quantization(
grads_cpy[g], curr_residual[g], threshold)
sum_dequantized_vals += (decompr * rate)
for g in range(nworker):
assert_almost_equal(diffs[g].asnumpy(), sum_dequantized_vals)
pull_init_test(kv)
pull_before_push(kv)
push_zeros(kv)
curval = verify_residual(kv, threshold, rate)
check_neg(kv, -1*threshold, rate, curval)
check_compr_random(kv, threshold)
## group keys interface
def test_group_kvstore(kv_type, stype):
print(kv_type)
kv = mx.kv.create(kv_type)
kv.set_optimizer(mx.optimizer.create('test', rescale_grad=lr))
kv.init(keys, [mx.nd.zeros(s) for s in shapes])
res = [np.zeros(s) for s in shapes]
out = [[mx.nd.zeros(s, mx.gpu(g)) for g in range(nworker)] for s in shapes]
for i in range(nrepeat):
kv.push(keys, [[
mx.nd.array(data[i][j][g], mx.gpu(g)).tostype(stype) for g in range(nworker)]
for j in range(len(keys))])
kv.pull(keys, out=out)
res = [a + b * lr for a, b in zip(res, [sum(d) for d in data[i]])]
for a, b in zip(res, out):
err = [np.sum(np.abs(o.asnumpy() - a)) for o in b]
err = sum(err) / np.sum(np.abs(a))
assert(err < 1e-6), (err, a.shape)
if __name__ == "__main__":
keys = [3, 5, 7]
# let the last shape exceed MXNET_KVSTORE_BIGARRAY_BOUND
shapes = [(4, 4), (100, 100), (2000, 2000)]
stypes = ['default', 'row_sparse']
gc_init_test_key = 9
lr = .1
nworker = 4
nrepeat = 10
# generate data
data = [[[np.random.random(s)*2-1 for i in range(nworker)] for s in shapes] for j in range(nrepeat)]
for stype in stypes:
test_kvstore('local_update_cpu', stype)
test_kvstore('local_allreduce_cpu', stype)
test_kvstore('local_allreduce_device', stype)
## compression for local kvstore happens only when reduce is on device
test_compress_kvstore('local_allreduce_device')
for stype in stypes:
test_group_kvstore('local_update_cpu', stype)
test_group_kvstore('local_allreduce_cpu', stype)
test_group_kvstore('local_allreduce_device', stype)
| apache-2.0 |
dannyperez/bolivarcoin | share/seeds/generate-seeds.py | 79 | 4297 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit |
Panos512/inspire-next | inspirehep/modules/authors/__init__.py | 3 | 1142 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""INSPIRE authors."""
from __future__ import absolute_import, print_function
from .ext import INSPIREAuthors
from .receivers import *
__all__ = ('INSPIREAuthors', )
| gpl-2.0 |
alphagov/notifications-admin | tests/app/main/views/test_notifications.py | 1 | 29605 | import base64
from functools import partial
from unittest.mock import mock_open
import pytest
from flask import url_for
from freezegun import freeze_time
from notifications_python_client.errors import APIError
from PyPDF2.utils import PdfReadError
from tests.conftest import (
SERVICE_ONE_ID,
create_active_caseworking_user,
create_active_user_with_permissions,
create_notification,
normalize_spaces,
)
@pytest.mark.parametrize('key_type, notification_status, expected_status', [
(None, 'created', 'Sending'),
(None, 'sending', 'Sending'),
(None, 'delivered', 'Delivered'),
(None, 'failed', 'Failed'),
(None, 'temporary-failure', 'Phone not accepting messages right now'),
(None, 'permanent-failure', 'Not delivered'),
(None, 'technical-failure', 'Technical failure'),
('team', 'delivered', 'Delivered'),
('live', 'delivered', 'Delivered'),
('test', 'sending', 'Sending (test)'),
('test', 'delivered', 'Delivered (test)'),
('test', 'permanent-failure', 'Not delivered (test)'),
])
@pytest.mark.parametrize('user', [
create_active_user_with_permissions(),
create_active_caseworking_user(),
])
@freeze_time("2016-01-01 11:09:00.061258")
def test_notification_status_page_shows_details(
client_request,
mocker,
mock_has_no_jobs,
service_one,
fake_uuid,
user,
key_type,
notification_status,
expected_status,
):
mocker.patch('app.user_api_client.get_user', return_value=user)
notification = create_notification(notification_status=notification_status, key_type=key_type)
_mock_get_notification = mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=service_one['id'],
notification_id=fake_uuid
)
assert normalize_spaces(page.select('.sms-message-recipient')[0].text) == (
'To: 07123456789'
)
assert normalize_spaces(page.select('.sms-message-wrapper')[0].text) == (
'service one: hello Jo'
)
assert normalize_spaces(page.select('.ajax-block-container p')[0].text) == (
expected_status
)
_mock_get_notification.assert_called_with(
service_one['id'],
fake_uuid
)
@pytest.mark.parametrize('notification_type, notification_status, expected_class', [
('sms', 'failed', 'error'),
('email', 'failed', 'error'),
('sms', 'sent', 'sent-international'),
('email', 'sent', None),
('sms', 'created', 'default'),
('email', 'created', 'default'),
])
@freeze_time("2016-01-01 11:09:00.061258")
def test_notification_status_page_formats_email_and_sms_status_correctly(
client_request,
mocker,
mock_has_no_jobs,
service_one,
fake_uuid,
active_user_with_permissions,
notification_type,
notification_status,
expected_class,
):
mocker.patch('app.user_api_client.get_user', return_value=active_user_with_permissions)
notification = create_notification(notification_status=notification_status, template_type=notification_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=service_one['id'],
notification_id=fake_uuid
)
assert page.select_one(f'.ajax-block-container p.notification-status.{expected_class}')
@pytest.mark.parametrize('template_redaction_setting, expected_content', [
(False, 'service one: hello Jo'),
(True, 'service one: hello hidden'),
])
@freeze_time("2016-01-01 11:09:00.061258")
def test_notification_status_page_respects_redaction(
client_request,
mocker,
service_one,
fake_uuid,
template_redaction_setting,
expected_content,
):
_mock_get_notification = mocker.patch(
'app.notification_api_client.get_notification',
return_value=create_notification(redact_personalisation=template_redaction_setting)
)
page = client_request.get(
'main.view_notification',
service_id=service_one['id'],
notification_id=fake_uuid
)
assert normalize_spaces(page.select('.sms-message-wrapper')[0].text) == expected_content
_mock_get_notification.assert_called_with(
service_one['id'],
fake_uuid,
)
@pytest.mark.parametrize('extra_args, expected_back_link', [
(
{},
partial(url_for, 'main.view_notifications', message_type='sms', status='sending,delivered,failed'),
),
(
{'from_job': 'job_id'},
partial(url_for, 'main.view_job', job_id='job_id'),
),
(
{'from_uploaded_letters': '2020-02-02'},
partial(url_for, 'main.uploaded_letters', letter_print_day='2020-02-02'),
),
(
{'help': '0'},
None,
),
(
{'help': '1'},
None,
),
(
{'help': '2'},
None,
),
])
def test_notification_status_shows_expected_back_link(
client_request,
mocker,
mock_get_notification,
fake_uuid,
extra_args,
expected_back_link,
):
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
**extra_args
)
back_link = page.select_one('.govuk-back-link')
if expected_back_link:
assert back_link['href'] == expected_back_link(service_id=SERVICE_ONE_ID)
else:
assert back_link is None
@pytest.mark.parametrize('time_of_viewing_page, expected_message', (
('2012-01-01 01:01', (
"‘sample template’ was sent by Test User today at 1:01am"
)),
('2012-01-02 01:01', (
"‘sample template’ was sent by Test User yesterday at 1:01am"
)),
('2012-01-03 01:01', (
"‘sample template’ was sent by Test User on 1 January at 1:01am"
)),
('2013-01-03 01:01', (
"‘sample template’ was sent by Test User on 1 January 2012 at 1:01am"
)),
))
def test_notification_page_doesnt_link_to_template_in_tour(
mocker,
client_request,
fake_uuid,
mock_get_notification,
time_of_viewing_page,
expected_message,
):
with freeze_time('2012-01-01 01:01'):
notification = create_notification()
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
with freeze_time(time_of_viewing_page):
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
help=3,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
expected_message
)
assert len(page.select('main p:nth-of-type(1) a')) == 0
@freeze_time("2016-01-01 01:01")
def test_notification_page_shows_page_for_letter_notification(
client_request,
mocker,
fake_uuid,
):
count_of_pages = 3
notification = create_notification(notification_status='created', template_type='letter', postage='second')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mock_page_count = mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=count_of_pages
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
"‘sample template’ was sent by Test User today at 1:01am"
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == (
'Printing starts today at 5:30pm'
)
assert normalize_spaces(page.select('main p:nth-of-type(3)')[0].text) == (
'Estimated delivery date: Wednesday 6 January'
)
assert len(page.select('.letter-postage')) == 1
assert normalize_spaces(page.select_one('.letter-postage').text) == (
'Postage: second class'
)
assert page.select_one('.letter-postage')['class'] == [
'letter-postage', 'letter-postage-second'
]
assert page.select('p.notification-status') == []
letter_images = page.select('main img')
assert len(letter_images) == count_of_pages
for index in range(count_of_pages):
assert page.select('img')[index]['src'].endswith(
'.png?page={}'.format(index + 1)
)
assert len(mock_page_count.call_args_list) == 1
assert mock_page_count.call_args_list[0][0][0]['name'] == 'sample template'
assert mock_page_count.call_args_list[0][1]['values'] == {'name': 'Jo'}
@freeze_time("2020-01-01 00:00")
def test_notification_page_shows_uploaded_letter(
client_request,
mocker,
fake_uuid,
):
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
return_value=(b'foo', {
'message': '',
'invalid_pages': '[]',
'page_count': '1'
})
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
notification = create_notification(
notification_status='created',
template_type='letter',
is_precompiled_letter=True,
sent_one_off=True,
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
'Uploaded by Test User yesterday at midnight'
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == (
'Printing starts today at 5:30pm'
)
@freeze_time("2016-01-01 01:01")
@pytest.mark.parametrize('is_precompiled_letter, expected_p1, expected_p2, expected_postage', (
(
True,
'Provided as PDF today at 1:01am',
'This letter passed our checks, but we will not print it because you used a test key.',
'Postage: second class'
),
(
False,
'‘sample template’ was sent today at 1:01am',
'We will not print this letter because you used a test key.',
'Postage: second class',
),
))
def test_notification_page_shows_page_for_letter_sent_with_test_key(
client_request,
mocker,
fake_uuid,
is_precompiled_letter,
expected_p1,
expected_p2,
expected_postage,
):
if is_precompiled_letter:
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
return_value=(b'foo', {
'message': '',
'invalid_pages': '[]',
'page_count': '1'
})
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
notification = create_notification(
notification_status='created',
template_type='letter',
is_precompiled_letter=is_precompiled_letter,
postage='second',
key_type='test',
sent_one_off=False,
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
expected_p1
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == (
expected_p2
)
assert normalize_spaces(
page.select_one('.letter-postage').text
) == expected_postage
assert page.select('p.notification-status') == []
def test_notification_page_shows_validation_failed_precompiled_letter(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter',
notification_status='validation-failed',
is_precompiled_letter=True
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
metadata = {"page_count": "1", "status": "validation-failed",
"invalid_pages": "[1]",
"message": "content-outside-printable-area"}
mocker.patch('app.main.views.notifications.get_letter_file_data',
return_value=("some letter content", metadata))
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
error_message = page.find('p', class_='notification-status-cancelled').text
assert normalize_spaces(error_message) == (
'Validation failed because content is outside the printable area on page 1.'
'Files must meet our letter specification.'
)
assert not page.select('p.notification-status')
assert page.select_one('main img')['src'].endswith('.png?page=1')
assert not page.select('.letter-postage')
@pytest.mark.parametrize('notification_status, expected_message', (
(
'permanent-failure',
'Permanent failure – The provider cannot print the letter. Your letter will not be dispatched.',
),
(
'cancelled',
'Cancelled 1 January at 1:02am',
),
(
'technical-failure',
'Technical failure – Notify will resend once the team have fixed the problem',
),
))
@freeze_time("2016-01-01 01:01")
def test_notification_page_shows_cancelled_or_failed_letter(
client_request,
mocker,
fake_uuid,
notification_status,
expected_message,
):
notification = create_notification(template_type='letter', notification_status=notification_status)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p')[0].text) == (
"‘sample template’ was sent by Test User today at 1:01am"
)
assert normalize_spaces(page.select('main p')[1].text) == (
expected_message
)
assert not page.select('p.notification-status')
assert page.select_one('main img')['src'].endswith('.png?page=1')
@pytest.mark.parametrize('notification_type', ['email', 'sms'])
@freeze_time('2016-01-01 15:00')
def test_notification_page_does_not_show_cancel_link_for_sms_or_email_notifications(
client_request,
mocker,
fake_uuid,
notification_type,
):
notification = create_notification(template_type=notification_type, notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'Cancel sending this letter' not in normalize_spaces(page.text)
@freeze_time('2016-01-01 15:00')
def test_notification_page_shows_cancel_link_for_letter_which_can_be_cancelled(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter', notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'Cancel sending this letter' in normalize_spaces(page.text)
@freeze_time('2016-01-01 15:00')
def test_notification_page_does_not_show_cancel_link_for_letter_which_cannot_be_cancelled(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'Cancel sending this letter' not in normalize_spaces(page.text)
@pytest.mark.parametrize('postage, expected_postage_text, expected_class_value, expected_delivery', (
(
'first',
'Postage: first class',
'letter-postage-first',
'Estimated delivery date: Tuesday 5 January',
),
(
'europe',
'Postage: international',
'letter-postage-international',
'Estimated delivery date: Friday 8 January',
),
(
'rest-of-world',
'Postage: international',
'letter-postage-international',
'Estimated delivery date: Monday 11 January',
),
))
@freeze_time("2016-01-01 18:00")
def test_notification_page_shows_page_for_other_postage_classes(
client_request,
mocker,
fake_uuid,
postage,
expected_postage_text,
expected_class_value,
expected_delivery,
):
notification = create_notification(
notification_status='pending-virus-check',
template_type='letter',
postage=postage,
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch('app.main.views.notifications.get_page_count_for_letter', return_value=3)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == 'Printing starts tomorrow at 5:30pm'
assert normalize_spaces(page.select('main p:nth-of-type(3)')[0].text) == (
expected_delivery
)
assert normalize_spaces(page.select_one('.letter-postage').text) == (
expected_postage_text
)
assert page.select_one('.letter-postage')['class'] == [
'letter-postage', expected_class_value
]
@pytest.mark.parametrize('filetype', [
'pdf', 'png'
])
@pytest.mark.parametrize('user', [
create_active_user_with_permissions(),
create_active_caseworking_user(),
])
def test_should_show_image_of_letter_notification(
logged_in_client,
fake_uuid,
mocker,
filetype,
user,
):
mocker.patch('app.user_api_client.get_user', return_value=user)
notification = create_notification(template_type='letter')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
return_value={
'content': base64.b64encode(b'foo').decode('utf-8')
}
)
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype=filetype
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'foo'
def test_should_show_image_of_letter_notification_that_failed_validation(
logged_in_client,
fake_uuid,
mocker
):
notification = create_notification(template_type='letter', notification_status='validation-failed')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
metadata = {
'message': 'content-outside-printable-area',
'invalid_pages': '[1]',
'page_count': '1'
}
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
return_value={
'content': base64.b64encode(b'foo').decode('utf-8'),
'metadata': metadata
}
)
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype='png',
with_metadata=True
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'foo', metadata
def test_should_show_preview_error_image_letter_notification_on_preview_error(
logged_in_client,
fake_uuid,
mocker,
):
notification = create_notification(template_type='letter')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
side_effect=APIError
)
mocker.patch("builtins.open", mock_open(read_data=b"preview error image"))
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype='png'
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'preview error image'
def test_notification_page_shows_error_message_if_precompiled_letter_cannot_be_opened(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(
notification_status='validation-failed',
template_type='letter',
is_precompiled_letter=True)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
side_effect=PdfReadError()
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
side_effect=PdfReadError()
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
error_message = page.find('p', class_='notification-status-cancelled').text
assert normalize_spaces(error_message) == \
"Validation failed – There’s a problem with your letter. Notify cannot read this PDF."
def test_should_404_for_unknown_extension(
client_request,
fake_uuid,
):
client_request.get(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype='docx',
_expected_status=404,
)
@pytest.mark.parametrize('service_permissions, template_type, link_expected', [
([], '', False),
(['inbound_sms'], 'email', False),
(['inbound_sms'], 'letter', False),
(['inbound_sms'], 'sms', True),
])
def test_notification_page_has_link_to_send_another_for_sms(
client_request,
mocker,
fake_uuid,
service_one,
service_permissions,
template_type,
link_expected,
):
service_one['permissions'] = service_permissions
notification = create_notification(template_type=template_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
last_paragraph = page.select('main p')[-1]
conversation_link = url_for(
'.conversation',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
_anchor='n{}'.format(fake_uuid),
)
if link_expected:
assert normalize_spaces(last_paragraph.text) == (
'See all text messages sent to this phone number'
)
assert last_paragraph.select_one('a')['href'] == conversation_link
else:
assert conversation_link not in str(page.select_one('main'))
@pytest.mark.parametrize('template_type, expected_link', [
('email', lambda notification_id: None),
('sms', lambda notification_id: None),
('letter', partial(
url_for,
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
filetype='pdf'
)),
])
def test_notification_page_has_link_to_download_letter(
client_request,
mocker,
fake_uuid,
service_one,
template_type,
expected_link,
):
notification = create_notification(template_type=template_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
try:
download_link = page.select_one('a[download]')['href']
except TypeError:
download_link = None
assert download_link == expected_link(notification_id=fake_uuid)
@pytest.mark.parametrize('is_precompiled_letter, has_template_link', [
(True, False),
(False, True),
])
def test_notification_page_has_expected_template_link_for_letter(
client_request,
mocker,
fake_uuid,
service_one,
is_precompiled_letter,
has_template_link
):
if is_precompiled_letter:
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
side_effect=[(b'foo', {"message": "", "invalid_pages": "[]", "page_count": "1"}), b'foo']
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
notification = create_notification(template_type='letter', is_precompiled_letter=is_precompiled_letter)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
link = page.select_one('main > p:nth-of-type(1) > a')
if has_template_link:
assert link
else:
assert link is None
def test_should_show_image_of_precompiled_letter_notification(
logged_in_client,
fake_uuid,
mocker,
):
notification = create_notification(template_type='letter', is_precompiled_letter=True)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mock_pdf_page_count = mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
return_value={
'content': base64.b64encode(b'foo').decode('utf-8')
}
)
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype="png"
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'foo'
assert mock_pdf_page_count.called_once()
@freeze_time('2016-01-01 15:00')
def test_show_cancel_letter_confirmation(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter', notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.cancel_letter',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
flash_message = normalize_spaces(page.find('div', class_='banner-dangerous').text)
assert 'Are you sure you want to cancel sending this letter?' in flash_message
@freeze_time('2016-01-01 15:00')
def test_cancelling_a_letter_calls_the_api(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter', notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
cancel_endpoint = mocker.patch(
'app.main.views.notifications.notification_api_client.update_notification_to_cancelled'
)
client_request.post(
'main.cancel_letter',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
_follow_redirects=True,
_expected_redirect=None,
)
assert cancel_endpoint.called
@pytest.mark.parametrize('notification_type', ['sms', 'email'])
def test_should_show_reply_to_from_notification(
mocker,
fake_uuid,
notification_type,
client_request,
):
notification = create_notification(reply_to_text='reply to info', template_type=notification_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'reply to info' in page.text
| mit |
tensorflow/models | research/delf/delf/python/examples/extract_boxes.py | 1 | 7510 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from delf import box_io
from delf import utils
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Create output directories if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.io.gfile.exists(
cmd_args.output_viz_dir):
tf.io.gfile.makedirs(cmd_args.output_viz_dir)
detector_fn = detector.MakeDetector(cmd_args.detector_path)
start = time.time()
for i, image_path in enumerate(image_paths):
# Report progress once in a while.
if i == 0:
print('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir, out_boxes_filename)
if tf.io.gfile.exists(out_boxes_fullpath):
print(f'Skipping {image_path}')
continue
im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0)
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0], scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
DG-i/openshift-ansible | roles/lib_openshift/src/ansible/oc_edit.py | 42 | 1774 | # pylint: skip-file
# flake8: noqa
def main():
'''
ansible oc module for editing objects
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kind=dict(required=True,
type='str',
choices=['dc', 'deploymentconfig',
'rc', 'replicationcontroller',
'svc', 'service',
'scc', 'securitycontextconstraints',
'ns', 'namespace', 'project', 'projects',
'is', 'imagestream',
'istag', 'imagestreamtag',
'bc', 'buildconfig',
'routes',
'node',
'secret',
'pv', 'persistentvolume']),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
force=dict(default=False, type='bool'),
separator=dict(default='.', type='str'),
),
supports_check_mode=True,
)
rval = Edit.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
| apache-2.0 |
demon-ru/iml-crm | addons/hw_posbox_upgrade/controllers/main.py | 172 | 4161 | # -*- coding: utf-8 -*-
import logging
import os
import time
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import threading
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
upgrade_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Odoo's PosBox - Software Upgrade</title>
<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
$(function(){
var upgrading = false;
$('#upgrade').click(function(){
console.log('click');
if(!upgrading){
upgrading = true;
$('#upgrade').text('Upgrading, Please Wait');
$.ajax({
url:'/hw_proxy/perform_upgrade/'
}).then(function(status){
$('#upgrade').html('Upgrade Successful<br \\>Click to Restart the PosBox');
$('#upgrade').off('click');
$('#upgrade').click(function(){
$.ajax({ url:'/hw_proxy/perform_restart' })
$('#upgrade').text('Restarting');
$('#upgrade').off('click');
setTimeout(function(){
window.location = '/'
},30*1000);
});
},function(){
$('#upgrade').text('Upgrade Failed');
});
}
});
});
</script>
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.centering{
text-align: center;
}
#upgrade {
padding: 20px;
background: rgb(121, 197, 107);
color: white;
border-radius: 3px;
text-align: center;
margin: 30px;
text-decoration: none;
display: inline-block;
}
</style>
</head>
<body>
<h1>PosBox Software Upgrade</h1>
<p>
This tool will help you perform an upgrade of the PosBox's software.
However the preferred method to upgrade the posbox is to flash the sd-card with
the <a href='http://nightly.openerp.com/trunk/posbox/'>latest image</a>. The upgrade
procedure is explained into to the <a href='/hw_proxy/static/doc/manual.pdf'>PosBox manual</a>
</p>
<p>
To upgrade the posbox, click on the upgrade button. The upgrade will take a few minutes. <b>Do not reboot</b> the PosBox during the upgrade.
</p>
<div class='centering'>
<a href='#' id='upgrade'>Upgrade</a>
</div>
</body>
</html>
"""
class PosboxUpgrader(hw_proxy.Proxy):
def __init__(self):
super(PosboxUpgrader,self).__init__()
self.upgrading = threading.Lock()
self.last_upgrade = 0
@http.route('/hw_proxy/upgrade', type='http', auth='none', )
def upgrade(self):
return upgrade_template
@http.route('/hw_proxy/perform_upgrade', type='http', auth='none')
def perform_upgrade(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'UPTODATE'
else:
os.system('/bin/bash /home/pi/openerp/update.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
@http.route('/hw_proxy/perform_restart', type='http', auth='none')
def perform_restart(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'RESTARTED'
else:
os.system('/bin/bash /home/pi/openerp/restart.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
| agpl-3.0 |
mindm/2017Challenges | challenge_6/python/slandau3/ranges.py | 5 | 1713 | #!/usr/bin/env python3
# @author Slandau3
def ranges(input: list) -> list:
# The patterns_found list keeps track of the definite ranges we have completed
patterns_found = []
# pattern_in_progress keeps track of a pattern we are tracing
pattern_in_progress = []
for i in input:
if len(pattern_in_progress) == 0: # Add the beginning of the input to the pattern_inprogress list
pattern_in_progress.append(i)
elif i == pattern_in_progress[-1]+1:
# If the integer we are currently looking at is the same as the previous number
# in pattern_in_progress +1, then we know the range will continue
pattern_in_progress.append(i)
else:
# If you're here than we found an integer that is not the previous integer + 1
if pattern_in_progress[0] == pattern_in_progress[-1]: # Case for two "rangeless" integers in order
# ex: 1, 2, 3, 14, 25, 30, 31, 32
pattern_in_progress = [i]
continue # ends this iteration of the loop
# The fact that we have gotten this far means we are ready to add the range to the patterns_found list
patterns_found.append("{num1}->{num2}".format(num1=pattern_in_progress[0], num2=pattern_in_progress[-1]))
pattern_in_progress = [i]
if len(pattern_in_progress) > 1:
# This if statement ensures that a range that was in progress when the loop ended is added to
# the patterns_found list.
patterns_found.append("{num1}->{num2}".format(num1=pattern_in_progress[0], num2=pattern_in_progress[-1]))
return patterns_found
if __name__ == '__main__':
ranges(list(input())) | mit |
bentilly/heroes | lib/werkzeug/contrib/limiter.py | 295 | 1333 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
| apache-2.0 |
vrenkens/Nabu-asr | nabu/neuralnetworks/components/layer.py | 2 | 3940 | '''@file layer.py
Neural network layers '''
import tensorflow as tf
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn
from nabu.neuralnetworks.components import ops
def blstm(
inputs,
sequence_length,
num_units,
layer_norm=False,
scope=None):
'''
a BLSTM layer
args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
num_units: The number of units in the one directon
layer_norm: whether layer normalization should be applied
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
returns:
the blstm outputs
'''
with tf.variable_scope(scope or 'BLSTM'):
#create the lstm cell that will be used for the forward and backward
#pass
lstm_cell_fw = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units=num_units,
layer_norm=layer_norm,
reuse=tf.get_variable_scope().reuse)
lstm_cell_bw = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
layer_norm=layer_norm,
reuse=tf.get_variable_scope().reuse)
#do the forward computation
outputs_tupple, _ = bidirectional_dynamic_rnn(
lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs = tf.concat(outputs_tupple, 2)
return outputs
def pblstm(
inputs,
sequence_length,
num_units,
num_steps=2,
layer_norm=False,
scope=None):
'''
a Pyramidal BLSTM layer
args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
num_units: The number of units in the one directon
num_steps: the number of time steps to concatenate
layer_norm: whether layer normalization should be applied
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
returns:
- the PBLSTM outputs
- the new sequence lengths
'''
with tf.variable_scope(scope or 'PBLSTM'):
#apply blstm layer
outputs = blstm(
inputs=inputs,
sequence_length=sequence_length,
num_units=num_units,
layer_norm=layer_norm
)
#stack the outputs
outputs, output_seq_lengths = ops.pyramid_stack(
outputs,
sequence_length,
num_steps)
return outputs, output_seq_lengths
def projected_subsampling(inputs, input_seq_lengths, num_steps, name=None):
'''
apply projected subsampling, this is concatenating 2 timesteps,
projecting to a lower dimensionality, applying batch_normalization
and a relu layer
args:
inputs: a [batch_size x max_length x dim] input tensorflow
input_seq_lengths: the input sequence lengths as a [batch_size] vector
num_steps: the number of steps to concatenate
is_training: bool training mode
name: the name of the operation
returns:
- a [batch_size x ceil(max_length/2) x dim] output tensor
- the output sequence lengths as a [batch_size] vector
'''
with tf.variable_scope(name or 'subsampling'):
input_dim = int(inputs.get_shape()[2])
#concatenate 2 timesteps
stacked_inputs, output_seq_lengths = ops.pyramid_stack(
inputs,
input_seq_lengths,
num_steps)
#project back to the input dimension
outputs = tf.contrib.layers.linear(stacked_inputs, input_dim)
return outputs, output_seq_lengths
| mit |
dohoangkhiem/ansible-modules-extras | cloud/rackspace/rax_mon_entity.py | 123 | 6171 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_mon_entity
short_description: Create or delete a Rackspace Cloud Monitoring entity
description:
- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
to monitor. Entities associate checks and alarms with a target system and
provide a convenient, centralized place to store IP addresses. Rackspace
monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
label:
description:
- Defines a name for this entity. Must be a non-empty string between 1 and
255 characters long.
required: true
state:
description:
- Ensure that an entity with this C(name) exists or does not exist.
choices: ["present", "absent"]
agent_id:
description:
- Rackspace monitoring agent on the target device to which this entity is
bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
named_ip_addresses:
description:
- Hash of IP addresses that may be referenced by name by rax_mon_checks
added to this entity. Must be a dictionary of with keys that are names
between 1 and 64 characters long, and values that are valid IPv4 or IPv6
addresses.
metadata:
description:
- Hash of arbitrary C(name), C(value) pairs that are passed to associated
rax_mon_alarms. Names and values must all be between 1 and 255 characters
long.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Entity example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Ensure an entity exists
rax_mon_entity:
credentials: ~/.rax_pub
state: present
label: my_entity
named_ip_addresses:
web_box: 192.168.0.10
db_box: 192.168.0.11
meta:
hurf: durf
register: the_entity
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
metadata):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for entity in cm.list_entities():
if label == entity.label:
existing.append(entity)
entity = None
if existing:
entity = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing entities have the label %s.' %
(len(existing), label))
if entity:
if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
should_delete = should_create = True
# Change an existing Entity, unless there's nothing to do.
should_update = agent_id and agent_id != entity.agent_id or \
(metadata and metadata != entity.metadata)
if should_update and not should_delete:
entity.update(agent_id, metadata)
changed = True
if should_delete:
entity.delete()
else:
should_create = True
if should_create:
# Create a new Entity.
entity = cm.create_entity(label=label, agent=agent_id,
ip_addresses=named_ip_addresses,
metadata=metadata)
changed = True
else:
# Delete the existing Entities.
for e in existing:
e.delete()
changed = True
if entity:
entity_dict = {
"id": entity.id,
"name": entity.name,
"agent_id": entity.agent_id,
}
module.exit_json(changed=changed, entity=entity_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
agent_id=dict(),
named_ip_addresses=dict(type='dict', default={}),
metadata=dict(type='dict', default={})
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
agent_id = module.params.get('agent_id')
named_ip_addresses = module.params.get('named_ip_addresses')
metadata = module.params.get('metadata')
setup_rax_module(module, pyrax)
cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# Invoke the module.
main()
| gpl-3.0 |
marcoscaceres/AlarmAPI | node_modules/grunt-contrib-uglify/node_modules/gzip-js/test/zipTest.py | 182 | 1836 | import os
from helpers import run_cmd
from colorama import Fore
defaultTestDir = 'test-files'
defaultOutDir = 'test-outs'
"""
Run a single test
@param tFile- required; the full path to the file to run
@param level- optional (default: all); the compression level [1-9]
@return True if all tests passed; False if at least one test failed
"""
def runTest(tFile, level=None, outDir=defaultOutDir):
passed = True
if level == None:
for x in range(1, 10):
if runTest(tFile, x, outDir) == False:
passed = False
return passed
out1 = os.path.join(outDir, '%(file)s.%(level)d.gz' % {'file': os.path.basename(tFile), 'level' : level})
out2 = os.path.join(outDir, '%(file)s.%(level)d.out.gz' % {'file': os.path.basename(tFile), 'level' : level})
run_cmd('gzip -c -%(level)d %(file)s > %(outfile)s' % {'level' : level, 'file' : tFile, 'outfile' : out1})
run_cmd('../bin/gzip.js --level %(level)d --file %(file)s --output %(output)s' % {'level' : level, 'file' : tFile, 'output' : out2})
result = run_cmd('diff %(file1)s %(file2)s' % {'file1' : out1, 'file2' : out2})
if result['returncode'] == 0:
status = Fore.GREEN + 'PASSED' + Fore.RESET
else:
passed = False
status = Fore.RED + 'FAILED' + Fore.RESET
print 'Level %(level)d: %(status)s' % {'level' : level, 'status' : status}
return passed
"""
Runs all tests on the given level. This iterates throuth the testDir directory defined above.
@param level- The level to run on [1-9] (default: None, runs on all levels all)
@return True if all levels passed, False if at least one failed
"""
def runAll(level=None, testDir=defaultTestDir):
passed = True
for tFile in os.listdir(testDir):
fullPath = os.path.join(testDir, tFile)
print Fore.YELLOW + tFile + Fore.RESET
if runTest(fullPath, level) == False:
passed = False
print ''
return passed
| mit |
AICP/external_chromium_org | build/android/pylib/instrumentation/test_jar.py | 11 | 9038 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper class for instrumenation test jar."""
# pylint: disable=W0702
import collections
import logging
import os
import pickle
import re
import sys
from pylib import cmd_helper
from pylib import constants
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT,
'build', 'util', 'lib', 'common'))
import unittest_util # pylint: disable=F0401
# If you change the cached output of proguard, increment this number
PICKLE_FORMAT_VERSION = 1
class TestJar(object):
_ANNOTATIONS = frozenset(
['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest',
'FlakyTest', 'DisabledTest', 'Manual', 'PerfTest', 'HostDrivenTest'])
_DEFAULT_ANNOTATION = 'SmallTest'
_PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
_PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
_PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$')
_PROGUARD_ANNOTATION_CONST_RE = (
re.compile(r'\s*?- Constant element value.*$'))
_PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$')
def __init__(self, jar_path):
if not os.path.exists(jar_path):
raise Exception('%s not found, please build it' % jar_path)
self._PROGUARD_PATH = os.path.join(constants.ANDROID_SDK_ROOT,
'tools/proguard/lib/proguard.jar')
if not os.path.exists(self._PROGUARD_PATH):
self._PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'],
'external/proguard/lib/proguard.jar')
self._jar_path = jar_path
self._annotation_map = collections.defaultdict(list)
self._pickled_proguard_name = self._jar_path + '-proguard.pickle'
self._test_methods = []
if not self._GetCachedProguardData():
self._GetProguardData()
def _GetCachedProguardData(self):
if (os.path.exists(self._pickled_proguard_name) and
(os.path.getmtime(self._pickled_proguard_name) >
os.path.getmtime(self._jar_path))):
logging.info('Loading cached proguard output from %s',
self._pickled_proguard_name)
try:
with open(self._pickled_proguard_name, 'r') as r:
d = pickle.loads(r.read())
if d['VERSION'] == PICKLE_FORMAT_VERSION:
self._annotation_map = d['ANNOTATION_MAP']
self._test_methods = d['TEST_METHODS']
return True
except:
logging.warning('PICKLE_FORMAT_VERSION has changed, ignoring cache')
return False
def _GetProguardData(self):
proguard_output = cmd_helper.GetCmdOutput(['java', '-jar',
self._PROGUARD_PATH,
'-injars', self._jar_path,
'-dontshrink',
'-dontoptimize',
'-dontobfuscate',
'-dontpreverify',
'-dump',
]).split('\n')
clazz = None
method = None
annotation = None
has_value = False
qualified_method = None
for line in proguard_output:
m = self._PROGUARD_CLASS_RE.match(line)
if m:
clazz = m.group(1).replace('/', '.') # Change package delim.
annotation = None
continue
m = self._PROGUARD_METHOD_RE.match(line)
if m:
method = m.group(1)
annotation = None
qualified_method = clazz + '#' + method
if method.startswith('test') and clazz.endswith('Test'):
self._test_methods += [qualified_method]
continue
if not qualified_method:
# Ignore non-method annotations.
continue
m = self._PROGUARD_ANNOTATION_RE.match(line)
if m:
annotation = m.group(1).split('/')[-1] # Ignore the annotation package.
self._annotation_map[qualified_method].append(annotation)
has_value = False
continue
if annotation:
if not has_value:
m = self._PROGUARD_ANNOTATION_CONST_RE.match(line)
if m:
has_value = True
else:
m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line)
if m:
value = m.group(1)
self._annotation_map[qualified_method].append(
annotation + ':' + value)
has_value = False
logging.info('Storing proguard output to %s', self._pickled_proguard_name)
d = {'VERSION': PICKLE_FORMAT_VERSION,
'ANNOTATION_MAP': self._annotation_map,
'TEST_METHODS': self._test_methods}
with open(self._pickled_proguard_name, 'w') as f:
f.write(pickle.dumps(d))
def _GetAnnotationMap(self):
return self._annotation_map
@staticmethod
def _IsTestMethod(test):
class_name, method = test.split('#')
return class_name.endswith('Test') and method.startswith('test')
def GetTestAnnotations(self, test):
"""Returns a list of all annotations for the given |test|. May be empty."""
if not self._IsTestMethod(test):
return []
return self._GetAnnotationMap()[test]
@staticmethod
def _AnnotationsMatchFilters(annotation_filter_list, annotations):
"""Checks if annotations match any of the filters."""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if key + ':' + value in annotations:
return True
elif annotation_filter in annotations:
return True
return False
def GetAnnotatedTests(self, annotation_filter_list):
"""Returns a list of all tests that match the given annotation filters."""
return [test for test, annotations in self._GetAnnotationMap().iteritems()
if self._IsTestMethod(test) and self._AnnotationsMatchFilters(
annotation_filter_list, annotations)]
def GetTestMethods(self):
"""Returns a list of all test methods in this apk as Class#testMethod."""
return self._test_methods
def _GetTestsMissingAnnotation(self):
"""Get a list of test methods with no known annotations."""
tests_missing_annotations = []
for test_method in self.GetTestMethods():
annotations_ = frozenset(self.GetTestAnnotations(test_method))
if (annotations_.isdisjoint(self._ANNOTATIONS) and
not self.IsHostDrivenTest(test_method)):
tests_missing_annotations.append(test_method)
return sorted(tests_missing_annotations)
def GetAllMatchingTests(self, annotation_filter_list,
exclude_annotation_list, test_filter):
"""Get a list of tests matching any of the annotations and the filter.
Args:
annotation_filter_list: List of test annotations. A test must have at
least one of these annotations. A test without any annotations is
considered to be SmallTest.
exclude_annotation_list: List of test annotations. A test must not have
any of these annotations.
test_filter: Filter used for partial matching on the test method names.
Returns:
List of all matching tests.
"""
if annotation_filter_list:
available_tests = self.GetAnnotatedTests(annotation_filter_list)
# Include un-annotated tests in SmallTest.
if annotation_filter_list.count(self._DEFAULT_ANNOTATION) > 0:
for test in self._GetTestsMissingAnnotation():
logging.warning(
'%s has no annotations. Assuming "%s".', test,
self._DEFAULT_ANNOTATION)
available_tests.append(test)
if exclude_annotation_list:
excluded_tests = self.GetAnnotatedTests(exclude_annotation_list)
available_tests = list(set(available_tests) - set(excluded_tests))
else:
available_tests = [m for m in self.GetTestMethods()
if not self.IsHostDrivenTest(m)]
tests = []
if test_filter:
# |available_tests| are in adb instrument format: package.path.class#test.
# Maps a 'class.test' name to each 'package.path.class#test' name.
sanitized_test_names = dict([
(t.split('.')[-1].replace('#', '.'), t) for t in available_tests])
# Filters 'class.test' names and populates |tests| with the corresponding
# 'package.path.class#test' names.
tests = [
sanitized_test_names[t] for t in unittest_util.FilterTestNames(
sanitized_test_names.keys(), test_filter.replace('#', '.'))]
else:
tests = available_tests
return tests
@staticmethod
def IsHostDrivenTest(test):
return 'pythonDrivenTests' in test
| bsd-3-clause |
carloshwa/apps-android-wikipedia | scripts/make-templates.py | 2 | 6090 | #!/usr/bin/env python2
# coding=utf-8
import copy
import os
import json
import unicodecsv as csv
import codecs
from urllib2 import urlopen
from jinja2 import Environment, FileSystemLoader
CHINESE_WIKI_LANG = "zh"
SIMPLIFIED_CHINESE_LANG = "zh-hans"
TRADITIONAL_CHINESE_LANG = "zh-hant"
# Wikis that cause problems and hence we pretend
# do not exist.
# - "got" -> Gothic runes wiki. The name of got in got
# contains characters outside the Unicode BMP. Android
# hard crashes on these. Let's ignore these fellas
# for now.
OSTRICH_WIKIS = [u"got"]
# Represents a single wiki, along with arbitrary properties of that wiki
# Simple data container object
class Wiki(object):
def __init__(self, lang):
self.lang = lang
self.props = {}
# Represents a list of wikis plus their properties.
# Encapsulates rendering code as well
class WikiList(object):
def __init__(self, wikis):
self.wikis = wikis
self.template_env = Environment(loader=FileSystemLoader(
os.path.join(os.path.dirname(os.path.realpath(__file__)), u"templates")
))
def render(self, template, class_name, **kwargs):
data = {
u"class_name": class_name,
u"wikis": self.wikis
}
data.update(kwargs)
rendered = self.template_env.get_template(template).render(**data)
out = codecs.open(class_name + u".java", u"w", u"utf-8")
out.write(rendered)
out.close()
def build_wiki(lang, english_name, local_name, total_pages=0):
wiki = Wiki(lang)
wiki.props["english_name"] = english_name
wiki.props["local_name"] = local_name
wiki.props["total_pages"] = total_pages
return wiki
def list_from_wikistats():
URL = u"https://wikistats.wmflabs.org/api.php?action=dump&table=wikipedias&format=csv&s=good"
print(u"Fetching languages")
data = csv.reader(urlopen(URL))
wikis = []
is_first = True
for row in data:
if is_first:
is_first = False
continue # skip headers
wiki = build_wiki(lang=row[2], english_name=row[1], local_name=row[10],
total_pages=row[3])
wikis.append(wiki)
return wikis
# Remove unsupported wikis.
def filter_supported_wikis(wikis):
return [wiki for wiki in wikis if wiki.lang not in OSTRICH_WIKIS]
# Apply manual tweaks to the list of wikis before they're populated.
def preprocess_wikis(wikis):
# Add TestWiki.
wikis.append(build_wiki(lang="test", english_name="Test", local_name="Test",
total_pages=0))
return wikis
# Apply manual tweaks to the list of wikis after they're populated.
def postprocess_wikis(wiki_list):
# Add Simplified and Traditional Chinese dialects.
chineseWiki = next((wiki for wiki in wiki_list.wikis if wiki.lang == CHINESE_WIKI_LANG), None)
chineseWikiIndex = wiki_list.wikis.index(chineseWiki)
simplifiedWiki = copy.deepcopy(chineseWiki)
simplifiedWiki.lang = SIMPLIFIED_CHINESE_LANG
simplifiedWiki.props["english_name"] = "Simplified Chinese"
simplifiedWiki.props["local_name"] = "简体"
wiki_list.wikis.insert(chineseWikiIndex + 1, simplifiedWiki)
traditionalWiki = copy.deepcopy(chineseWiki)
traditionalWiki.lang = TRADITIONAL_CHINESE_LANG
traditionalWiki.props["english_name"] = "Traditional Chinese"
traditionalWiki.props["local_name"] = "繁體"
wiki_list.wikis.insert(chineseWikiIndex + 2, traditionalWiki)
return wiki_list
# Populate the aliases for "Special:" and "File:" in all wikis
def populate_aliases(wikis):
for wiki in wikis.wikis:
print(u"Fetching Special Page and File alias for %s" % wiki.lang)
url = u"https://%s.wikipedia.org/w/api.php" % wiki.lang + \
u"?action=query&meta=siteinfo&format=json&siprop=namespaces"
data = json.load(urlopen(url))
# according to https://www.mediawiki.org/wiki/Manual:Namespace
# -1 seems to be the ID for Special Pages
wiki.props[u"special_alias"] = data[u"query"][u"namespaces"][u"-1"][u"*"]
# 6 is the ID for File pages
wiki.props[u"file_alias"] = data[u"query"][u"namespaces"][u"6"][u"*"]
return wikis
# Populates data on names of main page in each wiki
def populate_main_pages(wikis):
for wiki in wikis.wikis:
print(u"Fetching Main Page for %s" % wiki.lang)
url = u"https://%s.wikipedia.org/w/api.php" % wiki.lang + \
u"?action=query&meta=siteinfo&format=json&siprop=general"
data = json.load(urlopen(url))
wiki.props[u"main_page_name"] = data[u"query"][u"general"][u"mainpage"]
return wikis
# Returns a function that renders a particular template when passed
# a WikiList object
def render_template(template, filename, **kwargs):
def _actual_render(wikis):
wikis.render(template, filename, **kwargs)
return wikis
return _actual_render
# Render things into a simple key value JSON dict
# Useful for the iOS side of things
def render_simple_json(key, filename):
def _actual_render(wikis):
data = dict([(wiki.lang, wiki.props[key]) for wiki in wikis.wikis])
out = codecs.open(filename, u"w", u"utf-8")
out.write(json.dumps(data))
out.close()
return wikis
return _actual_render
# Kinda like reduce(), but special cases first function
def chain(*funcs):
res = funcs[0]()
for func in funcs[1:]:
res = func(res)
chain(
list_from_wikistats,
filter_supported_wikis,
preprocess_wikis,
WikiList,
populate_aliases,
populate_main_pages,
postprocess_wikis,
render_template(u"basichash.java.jinja", u"SpecialAliasData", key=u"special_alias"),
render_template(u"basichash.java.jinja", u"FileAliasData", key=u"file_alias"),
render_simple_json(u"special_alias", u"specialalias.json"),
render_simple_json(u"file_alias", u"filealias.json"),
render_template(u"basichash.java.jinja", u"MainPageNameData", key=u"main_page_name"),
render_simple_json(u"main_page_name", u"mainpages.json")
)
| apache-2.0 |
shsingh/ansible | test/units/modules/network/fortios/test_fortios_log_syslogd_override_filter.py | 21 | 9346 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_log_syslogd_override_filter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_syslogd_override_filter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_syslogd_override_filter_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_syslogd_override_filter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_syslogd_override_filter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_syslogd_override_filter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'random_attribute_not_valid': 'tag',
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
fredwilliam/PMO | apps/hq/middleware/hq.py | 3 | 2738 | from __future__ import absolute_import
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from hq.authentication import get_username_password
from hq.utils import get_dates
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
# this keeps a thread-local cache of stuff. we're gonna stick some HQ
# stuff inside so that we have access to the user and domain from things
# that don't have a handle to the request object
_thread_locals = local()
def get_current_user():
"""Get the current (thread-specific) user"""
return getattr(_thread_locals, 'user', None)
def get_current_domain():
"""Get the current (thread-specific) user"""
return getattr(_thread_locals, 'domain', None)
class HqMiddleware(object):
'''Middleware for CommCare HQ. Right now the only thing this does is
set some stuff in the thread locals (user and domain) if they exist
as well as do some custom authentication for unsalted passwords and
set convenience accessors for passed in dates in urlparams.'''
def process_request(self, request):
_thread_locals.user = getattr(request, 'user', None)
if request.user and not request.user.is_anonymous():
self._set_local_vars(request, request.user)
else:
# attempt our custom authentication only if regular auth fails
# (and request.user == anonymousUser
username, password = get_username_password(request)
if username and password:
user = authenticate(username=username, password=password)
if user is not None:
request.user = user
self._set_local_vars(request, user)
# do the same for start and end dates. at some point our views
# can just start accessing these properties on the request assuming
# our middleware is running
try:
startdate, enddate = utils.get_dates(request)
request.startdate = startdate
request.enddate = enddate
except Exception:
request.startdate = None
request.enddate = None
return None
def _set_local_vars(self, request, user):
"""Sets the User and Domain objects in the threadlocals, if
they exist"""
try:
# set the domain in the thread locals
# so it can be accessed in places other
# than views.
_thread_locals.domain = request.user.selected_domain
except Exception:
# likely means that there's no selected user or
# domain, just let it go.
pass
| bsd-3-clause |
sebalix/OpenUpgrade | openerp/tools/float_utils.py | 312 | 10296 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
minimizing IEEE-754 floating point representation errors, and applying
the tie-breaking rule selected with ``rounding_method``, by default
HALF-UP (away from zero).
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param rounding_method: the rounding method used: 'HALF-UP' or 'UP', the first
one rounding up to the closest number with the rule that number>=0.5 is
rounded up to 1, and the latest one always rounding up.
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP (for normal rounding)
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
if rounding_method == 'HALF-UP':
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
# TIE-BREAKING: UP (for ceiling operations)
# When rounding the value up, we instead subtract the epsilon value
# as the the approximation of the real value may be slightly *above* the
# tie limit, this would result in incorrectly rounding up to the next number
# The math.ceil operation is applied on the absolute value in order to
# round "away from zero" and not "towards infinity", then the sign is
# restored.
elif rounding_method == 'UP':
sign = cmp(normalized_value, 0)
normalized_value -= sign*epsilon
rounded_value = math.ceil(abs(normalized_value))*sign # ceil to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 |
imk1/IMKTFBindingCode | getCellTypeSpecificDomainBoundaries.py | 1 | 2787 | import sys
import argparse
import gzip
import math
def parseArgument():
# Parse the input
parser =\
argparse.ArgumentParser(description = "Get domain boundaries that are present in the 1st cell type but not in the 2nd")
parser.add_argument("--cellTypeOneDomainsFileName", required=True,
help='Domain boundaries from 1st cell type, contains header, gzipped, chromosomes do not start with chr')
parser.add_argument("--cellTypeTwoDomainsFileName", required=True,
help='Domain boundaries from 2nd cell type, contains header, gzipped, chromosomes do not start with chr')
parser.add_argument("--outputFileName", required=True, help='Name of file where cell-type-specific domains will be recorded')
options = parser.parse_args();
return options
def getTopCorner(coordinateLine):
# Get the top corner of a domain
coordinateLineElements = coordinateLine.split("\t")
domainTopCorner = (coordinateLineElements[0], int(coordinateLineElements[2]), int(coordinateLineElements[5]))
return domainTopCorner
def getDomainPreserved(topCorner, topCornerTwo):
# Determine whether two domains are close enough
if topCorner[0] != topCornerTwo[0]:
# The domains are not close enough
return False
else:
domainDistance = math.sqrt(math.pow((topCorner[1] - topCornerTwo[1]), 2) + math.pow((topCorner[2] - topCornerTwo[2]), 2))
distanceBound = 50000
if distanceBound > 0.2 * abs(topCorner[1] - topCorner[2]):
# Make the upper bound on the distance 1/5 of the domain size
distanceBound = 0.2 * abs(topCorner[1] - topCorner[2])
if domainDistance <= distanceBound:
# The domain is preserved
return True
return False
def getCellTypeSpecificDomainBoundaries(options):
# Get domain boundaries that are present in the 1st cell type but not in the 2nd
cellTypeOneDomainsFile = gzip.open(options.cellTypeOneDomainsFileName)
cellTypeOneDomainsFile.readline() # Remove the header
cellTypeTwoDomainsFile = gzip.open(options.cellTypeTwoDomainsFileName)
cellTypeTwoDomainsFile.readline() # Remove the header
cellTypeTwoDomainTopCorners = [getTopCorner(line.strip()) for line in cellTypeTwoDomainsFile.readlines()]
cellTypeTwoDomainsFile.close()
outputFile = open(options.outputFileName, 'w+')
for line in cellTypeOneDomainsFile:
# Iterate through the domains of the 1st cell type and record the domains that are not in the 2nd cell type
topCorner = getTopCorner(line.strip())
domainPreservedList = [getDomainPreserved(topCorner, topCornerTwo) for topCornerTwo in cellTypeTwoDomainTopCorners]
if True not in domainPreservedList:
# The domain is cell-type-specific, so record it
outputFile.write(line)
cellTypeOneDomainsFile.close()
outputFile.close()
if __name__=="__main__":
options = parseArgument()
getCellTypeSpecificDomainBoundaries(options)
| mit |
guorendong/iridium-browser-ubuntu | native_client/pnacl/driver/shelltools.py | 8 | 2070 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from driver_log import Log
import types
######################################################################
#
# Shell Utilities
#
######################################################################
class shell(object):
@staticmethod
def unescape(s):
w = shell.split(s)
if len(w) == 0:
return ''
if len(w) == 1:
return w[0]
# String was not properly escaped in the first place?
assert(False)
# TODO(pdox): Simplify this function by moving more of it into unescape
@staticmethod
def split(s):
"""Split a shell-style string up into a list of distinct arguments.
For example: split('cmd -arg1 -arg2="a b c"')
Returns ['cmd', '-arg1', '-arg2=a b c']
"""
assert(isinstance(s, types.StringTypes))
out = []
inspace = True
inquote = False
buf = ''
i = 0
while i < len(s):
if s[i] == '"':
inspace = False
inquote = not inquote
elif s[i] == ' ' and not inquote:
if not inspace:
out.append(buf)
buf = ''
inspace = True
elif s[i] == '\\':
if not i+1 < len(s):
Log.Fatal('Unterminated \\ escape sequence')
inspace = False
i += 1
buf += s[i]
else:
inspace = False
buf += s[i]
i += 1
if inquote:
Log.Fatal('Unterminated quote')
if not inspace:
out.append(buf)
return out
@staticmethod
def join(args):
"""Turn a list into a shell-style string For example:
shell.join([ 'a', 'b', 'c d e' ]) = 'a b "c d e"'
"""
return ' '.join([ shell.escape(a) for a in args ])
@staticmethod
def escape(s):
"""Shell-escape special characters in a string
Surround with quotes if necessary
"""
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
if ' ' in s:
s = '"' + s + '"'
return s
| bsd-3-clause |
Drooids/odoo | addons/account_asset/wizard/account_asset_change_duration.py | 258 | 5021 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class asset_modify(osv.osv_memory):
_name = 'asset.modify'
_description = 'Modify Asset'
def _get_asset_method_time(self, cr, uid, ids, field_name, arg, context=None):
if ids and len(ids) == 1 and context.get('active_id'):
asset = self.pool['account.asset.asset'].browse(cr, uid, context.get('active_id'), context=context)
return {ids[0]: asset.method_time}
else:
return dict.fromkeys(ids, False)
_columns = {
'name': fields.char('Reason', required=True),
'method_number': fields.integer('Number of Depreciations', required=True),
'method_period': fields.integer('Period Length'),
'method_end': fields.date('Ending date'),
'note': fields.text('Notes'),
'asset_method_time': fields.function(_get_asset_method_time, type='char', string='Asset Method Time', readonly=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
res = super(asset_modify, self).default_get(cr, uid, fields, context=context)
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
if 'name' in fields:
res.update({'name': asset.name})
if 'method_number' in fields and asset.method_time == 'number':
res.update({'method_number': asset.method_number})
if 'method_period' in fields:
res.update({'method_period': asset.method_period})
if 'method_end' in fields and asset.method_time == 'end':
res.update({'method_end': asset.method_end})
if context.get('active_id'):
res['asset_method_time'] = self._get_asset_method_time(cr, uid, [0], 'asset_method_time', [], context=context)[0]
return res
def modify(self, cr, uid, ids, context=None):
""" Modifies the duration of asset for calculating depreciation
and maintains the history of old values.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of Ids
@param context: A standard dictionary
@return: Close the wizard.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
history_obj = self.pool.get('account.asset.history')
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
data = self.browse(cr, uid, ids[0], context=context)
history_vals = {
'asset_id': asset_id,
'name': data.name,
'method_time': asset.method_time,
'method_number': asset.method_number,
'method_period': asset.method_period,
'method_end': asset.method_end,
'user_id': uid,
'date': time.strftime('%Y-%m-%d'),
'note': data.note,
}
history_obj.create(cr, uid, history_vals, context=context)
asset_vals = {
'method_number': data.method_number,
'method_period': data.method_period,
'method_end': data.method_end,
}
asset_obj.write(cr, uid, [asset_id], asset_vals, context=context)
asset_obj.compute_depreciation_board(cr, uid, [asset_id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
if1live/pelican-plugins | extract_toc/extract_toc.py | 28 | 1805 | # -*- coding: utf-8 -*-
"""
Extract Table of Content
========================
A Pelican plugin to extract table of contents (ToC) from `article.content` and
place it in its own `article.toc` variable for use in templates.
"""
from os import path
from bs4 import BeautifulSoup
from pelican import signals, readers, contents
try:
from pandoc_reader import PandocReader
except ImportError:
PandocReader = False
def extract_toc(content):
if isinstance(content, contents.Static):
return
soup = BeautifulSoup(content._content,'html.parser')
filename = content.source_path
extension = path.splitext(filename)[1][1:]
toc = None
# default Markdown reader
if not toc and readers.MarkdownReader.enabled and extension in readers.MarkdownReader.file_extensions:
toc = soup.find('div', class_='toc')
if toc: toc.extract()
# default reStructuredText reader
if not toc and readers.RstReader.enabled and extension in readers.RstReader.file_extensions:
toc = soup.find('div', class_='contents topic')
if toc: toc.extract()
if toc:
tag=BeautifulSoup(str(toc), 'html.parser')
tag.div['class']='toc'
tag.div['id']=''
p=tag.find('p', class_='topic-title first')
if p:p.extract()
toc=tag
# Pandoc reader (markdown and other formats)
if not toc and PandocReader and PandocReader.enabled and extension in PandocReader.file_extensions:
toc = soup.find('nav', id='TOC')
if toc:
toc.extract()
content._content = soup.decode()
content.toc = toc.decode()
if content.toc.startswith('<html>'):
content.toc = content.toc[12:-14]
def register():
signals.content_object_init.connect(extract_toc)
| agpl-3.0 |
hujiajie/pa-chromium | chrome/test/functional/omnibox.py | 65 | 15428 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import shutil
import tempfile
import urlparse
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class OmniboxTest(pyauto.PyUITest):
"""Test cases for the omnibox."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
import time
while True:
self.pprint(self.GetOmniboxInfo().omniboxdict)
time.sleep(1)
def testFocusOnStartup(self):
"""Verify that the omnibox has focus on startup."""
self.WaitUntilOmniboxReadyHack()
self.assertTrue(self.GetOmniboxInfo().Properties('has_focus'))
def testHistoryResult(self):
"""Verify that the omnibox can fetch items from the history."""
url = self.GetFileURLForDataPath('title2.html')
title = 'Title Of Awesomeness'
self.AppendTab(pyauto.GURL(url))
def _VerifyHistoryResult(query_list, description, windex=0):
"""Verify result matching given description for given list of queries."""
for query_text in query_list:
matches = test_utils.GetOmniboxMatchesFor(
self, query_text, windex=windex,
attr_dict={'description': description})
self.assertTrue(matches)
self.assertEqual(1, len(matches))
item = matches[0]
self.assertEqual(url, item['destination_url'])
# Query using URL & title.
_VerifyHistoryResult([url, title], title)
# Verify results in another tab.
self.AppendTab(pyauto.GURL())
_VerifyHistoryResult([url, title], title)
# Verify results in another window.
self.OpenNewBrowserWindow(True)
self.WaitUntilOmniboxReadyHack(windex=1)
_VerifyHistoryResult([url, title], title, windex=1)
# Verify results in an incognito window.
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.WaitUntilOmniboxReadyHack(windex=2)
_VerifyHistoryResult([url, title], title, windex=2)
def _VerifyOmniboxURLMatches(self, url, description, windex=0):
"""Verify URL match results from the omnibox.
Args:
url: The URL to use.
description: The string description within the history page and Google
search to match against.
windex: The window index to work on. Defaults to 0 (first window).
"""
matches_description = test_utils.GetOmniboxMatchesFor(
self, url, windex=windex, attr_dict={'description': description})
self.assertEqual(1, len(matches_description))
if description == 'Google Search':
self.assertTrue(re.match('http://www.google.com/search.+',
matches_description[0]['destination_url']))
else:
self.assertEqual(url, matches_description[0]['destination_url'])
def testFetchHistoryResultItems(self):
"""Verify omnibox fetches history items in 2nd tab, window and incognito."""
url = self.GetFileURLForDataPath('title2.html')
title = 'Title Of Awesomeness'
desc = 'Google Search'
# Fetch history page item in the second tab.
self.AppendTab(pyauto.GURL(url))
self._VerifyOmniboxURLMatches(url, title)
# Fetch history page items in the second window.
self.OpenNewBrowserWindow(True)
self.NavigateToURL(url, 1, 0)
self._VerifyOmniboxURLMatches(url, title, windex=1)
# Fetch google search items in incognito window.
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 2, 0)
self._VerifyOmniboxURLMatches(url, desc, windex=2)
def testSelect(self):
"""Verify omnibox popup selection."""
url1 = self.GetFileURLForDataPath('title2.html')
url2 = self.GetFileURLForDataPath('title1.html')
title1 = 'Title Of Awesomeness'
self.NavigateToURL(url1)
self.NavigateToURL(url2)
matches = test_utils.GetOmniboxMatchesFor(self, 'file://')
self.assertTrue(matches)
# Find the index of match for |url1|.
index = None
for i, match in enumerate(matches):
if match['description'] == title1:
index = i
self.assertTrue(index is not None)
self.OmniboxMovePopupSelection(index) # Select |url1| line in popup.
self.assertEqual(url1, self.GetOmniboxInfo().Text())
self.OmniboxAcceptInput()
self.assertEqual(title1, self.GetActiveTabTitle())
def testInlineAutoComplete(self):
"""Verify inline autocomplete for a pre-visited URL."""
self.NavigateToURL('http://www.google.com')
matches = test_utils.GetOmniboxMatchesFor(self, 'goog')
self.assertTrue(matches)
# Omnibox should suggest auto completed URL as the first item.
matches_description = matches[0]
self.assertTrue('www.google.com' in matches_description['contents'])
self.assertEqual('history-url', matches_description['type'])
# The URL should be inline-autocompleted in the omnibox.
self.assertTrue('google.com' in self.GetOmniboxInfo().Text())
def testCrazyFilenames(self):
"""Test omnibox query with filenames containing special chars.
The files are created on the fly and cleaned after use.
"""
filename = os.path.join(self.DataDir(), 'downloads', 'crazy_filenames.txt')
zip_names = self.EvalDataFrom(filename)
# We got .zip filenames. Change them to .html.
crazy_filenames = [x.replace('.zip', '.html') for x in zip_names]
title = 'given title'
def _CreateFile(name):
"""Create the given html file."""
fp = open(name, 'w') # |name| could be unicode.
print >>fp, '<html><title>%s</title><body>' % title
print >>fp, 'This is a junk file named <h2>%s</h2>' % repr(name)
print >>fp, '</body></html>'
fp.close()
crazy_fileurls = []
# Temp dir for hosting crazy filenames.
temp_dir = tempfile.mkdtemp(prefix='omnibox')
# Windows has a dual nature dealing with unicode filenames.
# While the files are internally saved as unicode, there's a non-unicode
# aware API that returns a locale-dependent coding on the true unicode
# filenames. This messes up things.
# Filesystem-interfacing functions like os.listdir() need to
# be given unicode strings to "do the right thing" on win.
# Ref: http://boodebr.org/main/python/all-about-python-and-unicode
try:
for filename in crazy_filenames: # |filename| is unicode.
file_path = os.path.join(temp_dir, filename.encode('utf-8'))
_CreateFile(os.path.join(temp_dir, filename))
file_url = self.GetFileURLForPath(file_path)
crazy_fileurls.append(file_url)
self.NavigateToURL(file_url)
# Verify omnibox queries.
for file_url in crazy_fileurls:
matches = test_utils.GetOmniboxMatchesFor(self,
file_url, attr_dict={'type': 'url-what-you-typed',
'description': title})
self.assertTrue(matches)
self.assertEqual(1, len(matches))
self.assertTrue(os.path.basename(file_url) in
matches[0]['destination_url'])
finally:
shutil.rmtree(unicode(temp_dir)) # Unicode so that Win treats nicely.
def testSuggest(self):
"""Verify suggested results in omnibox."""
matches = test_utils.GetOmniboxMatchesFor(self, 'apple')
self.assertTrue(matches)
self.assertTrue([x for x in matches if x['type'] == 'search-suggest'])
def testDifferentTypesOfResults(self):
"""Verify different types of results from omnibox.
This includes history result, bookmark result, suggest results.
"""
url = 'http://www.google.com/'
title = 'Google'
search_string = 'google'
self.AddBookmarkURL( # Add a bookmark.
self.GetBookmarkModel().BookmarkBar()['id'], 0, title, url)
self.NavigateToURL(url) # Build up history.
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(matches)
# Verify starred result (indicating bookmarked url).
self.assertTrue([x for x in matches if x['starred'] == True])
for item_type in ('history-url', 'search-what-you-typed',
'search-suggest',):
self.assertTrue([x for x in matches if x['type'] == item_type])
def testSuggestPref(self):
"""Verify no suggests for omnibox when suggested-services disabled."""
search_string = 'apple'
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kSearchSuggestEnabled))
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(matches)
self.assertTrue([x for x in matches if x['type'] == 'search-suggest'])
# Disable suggest-service.
self.SetPrefs(pyauto.kSearchSuggestEnabled, False)
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kSearchSuggestEnabled))
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(matches)
# Verify there are no suggest results.
self.assertFalse([x for x in matches if x['type'] == 'search-suggest'])
def testAutoCompleteForSearch(self):
"""Verify omnibox autocomplete for search."""
search_string = 'youtu'
verify_string = 'youtube'
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
# Retrieve last contents element.
matches_description = matches[-1]['contents'].split()
self.assertEqual(verify_string, matches_description[0])
def _GotContentHistory(self, search_text, url):
"""Check if omnibox returns a previously-visited page for given search text.
Args:
search_text: The string search text.
url: The string URL to look for in the omnibox matches.
Returns:
True, if the omnibox returns the previously-visited page for the given
search text, or False otherwise.
"""
# Omnibox doesn't change results if searching the same text repeatedly.
# So setting '' in omnibox before the next repeated search.
self.SetOmniboxText('')
matches = test_utils.GetOmniboxMatchesFor(self, search_text)
matches_description = [x for x in matches if x['destination_url'] == url]
return 1 == len(matches_description)
def testContentHistory(self):
"""Verify omnibox results when entering page content.
Test verifies that visited page shows up in omnibox on entering page
content.
"""
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'find_in_page', 'largepage.html'))
self.NavigateToURL(url)
self.assertTrue(self.WaitUntil(
lambda: self._GotContentHistory('British throne', url)))
def testOmniboxSearchHistory(self):
"""Verify page navigation/search from omnibox are added to the history."""
url = self.GetFileURLForDataPath('title2.html')
self.NavigateToURL(url)
self.AppendTab(pyauto.GURL('about:blank'))
self.SetOmniboxText('java')
self.WaitUntilOmniboxQueryDone()
self.OmniboxAcceptInput()
history = self.GetHistoryInfo().History()
self.assertEqual(2, len(history))
self.assertEqual(url, history[1]['url'])
self.assertEqual('java - Google Search', history[0]['title'])
def _VerifyHasBookmarkResult(self, matches):
"""Verify that we have a bookmark result.
Args:
matches: A list of match items, as returned by
test_utils.GetOmniboxMatchesFor().
"""
matches_starred = [result for result in matches if result['starred']]
self.assertTrue(matches_starred)
self.assertEqual(1, len(matches_starred))
def _CheckBookmarkResultForVariousInputs(self, url, title, windex=0):
"""Check if we get the bookmark for complete and partial inputs.
Args:
url: A string URL.
title: A string title for the given URL.
windex: The window index to use. Defaults to 0 (first window).
"""
# Check if the complete URL would get the bookmark.
url_matches = test_utils.GetOmniboxMatchesFor(self, url, windex=windex)
self._VerifyHasBookmarkResult(url_matches)
# Check if the complete title would get the bookmark.
title_matches = test_utils.GetOmniboxMatchesFor(self, title, windex=windex)
self._VerifyHasBookmarkResult(title_matches)
# Check if the partial URL would get the bookmark.
split_url = urlparse.urlsplit(url)
partial_url = test_utils.GetOmniboxMatchesFor(
self, split_url.scheme, windex=windex)
self._VerifyHasBookmarkResult(partial_url)
# Check if the partial title would get the bookmark.
split_title = title.split()
search_term = split_title[len(split_title) - 1]
partial_title = test_utils.GetOmniboxMatchesFor(
self, search_term, windex=windex)
self._VerifyHasBookmarkResult(partial_title)
def testBookmarkResultInNewTabAndWindow(self):
"""Verify omnibox finds bookmarks in search options of new tabs/windows."""
url = self.GetFileURLForDataPath('title2.html')
self.NavigateToURL(url)
title = 'This is Awesomeness'
bookmarks = self.GetBookmarkModel()
bar_id = bookmarks.BookmarkBar()['id']
self.AddBookmarkURL(bar_id, 0, title, url)
bookmarks = self.GetBookmarkModel()
nodes = bookmarks.FindByTitle(title)
self.AppendTab(pyauto.GURL(url))
self._CheckBookmarkResultForVariousInputs(url, title)
self.OpenNewBrowserWindow(True)
self.assertEqual(2, self.GetBrowserWindowCount())
self.NavigateToURL(url, 1, 0)
self._CheckBookmarkResultForVariousInputs(url, title, windex=1)
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.assertEqual(3, self.GetBrowserWindowCount())
self.NavigateToURL(url, 2, 0)
self._CheckBookmarkResultForVariousInputs(url, title, windex=2)
def testAutoCompleteForNonAsciiSearch(self):
"""Verify can search/autocomplete with non-ASCII incomplete keywords."""
search_string = u'\u767e'
verify_string = u'\u767e\u5ea6\u4e00\u4e0b'
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(verify_string in matches[-1]['contents'])
class OmniboxLiveTest(pyauto.PyUITest):
"""Test cases for the omnibox that hit live servers (such as Google)."""
def ExtraChromeFlags(self):
"""Override default list of extra flags used in pyauto tests."""
# Force the suggest field trial group. This doesn't guarantee that there
# will be no experimental behaviour, but there's no other way to disable
# all suggest field trials at the moment. TODO(mpearson): Consider allowing
# the suggest_url to be overridden using a flag (so that we can omit the
# "sugexp=chrome,mod=<n>" CGI param), or provide some other way to turn off
# all suggest field trials.
return ['--force-fieldtrials=OmniboxSearchSuggest/10/']
def testGoogleSearch(self):
"""Verify Google search item in omnibox results."""
search_text = 'hello world'
verify_str = 'Google Search'
url_re = 'http://www.google.com/search\?.*q=hello\+world.*'
matches_description = test_utils.GetOmniboxMatchesFor(
self, search_text, attr_dict={'description': verify_str})
self.assertTrue(matches_description)
# There should be a least one entry with the description Google. Suggest
# results may end up having 'Google Search' in them, so use >=.
self.assertTrue(len(matches_description) >= 1)
item = matches_description[0]
self.assertTrue(re.search(url_re, item['destination_url']))
self.assertEqual('search-what-you-typed', item['type'])
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
aaiyer/bugseverywhere | libbe/command/list.py | 5 | 10809 | # Copyright (C) 2005-2012 Aaron Bentley <[email protected]>
# Chris Ball <[email protected]>
# Gianluca Montecchi <[email protected]>
# Oleg Romanyshyn <[email protected]>
# Robert Lehmann <[email protected]>
# W. Trevor King <[email protected]>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import itertools
import os
import re
import libbe
import libbe.bug
import libbe.command
import libbe.command.depend
from libbe.command.depend import Filter, parse_status, parse_severity
import libbe.command.tag
import libbe.command.target
import libbe.command.util
# get a list of * for cmp_*() comparing two bugs.
AVAILABLE_CMPS = [fn[4:] for fn in dir(libbe.bug) if fn[:4] == 'cmp_']
AVAILABLE_CMPS.remove('attr') # a cmp_* template.
class List (libbe.command.Command):
"""List bugs
>>> import sys
>>> import libbe.bugdir
>>> bd = libbe.bugdir.SimpleBugDir(memory=False)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_storage(bd.storage)
>>> cmd = List(ui=ui)
>>> ret = ui.run(cmd)
abc/a:om: Bug A
>>> ret = ui.run(cmd, {'status':'closed'})
abc/b:cm: Bug B
>>> ret = ui.run(cmd, {'status':'all', 'sort':'time'})
abc/a:om: Bug A
abc/b:cm: Bug B
>>> bd.storage.writeable
True
>>> ui.cleanup()
>>> bd.cleanup()
"""
name = 'list'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.options.extend([
libbe.command.Option(name='status',
help='Only show bugs matching the STATUS specifier',
arg=libbe.command.Argument(
name='status', metavar='STATUS', default='active',
completion_callback=libbe.command.util.complete_status)),
libbe.command.Option(name='severity',
help='Only show bugs matching the SEVERITY specifier',
arg=libbe.command.Argument(
name='severity', metavar='SEVERITY', default='all',
completion_callback=libbe.command.util.complete_severity)),
libbe.command.Option(name='important',
help='List bugs with >= "serious" severity'),
libbe.command.Option(name='assigned', short_name='a',
help='Only show bugs matching ASSIGNED',
arg=libbe.command.Argument(
name='assigned', metavar='ASSIGNED', default=None,
completion_callback=libbe.command.util.complete_assigned)),
libbe.command.Option(name='mine', short_name='m',
help='List bugs assigned to you'),
libbe.command.Option(name='extra-strings', short_name='e',
help='Only show bugs matching STRINGS, e.g. --extra-strings'
' TAG:working,TAG:xml',
arg=libbe.command.Argument(
name='extra-strings', metavar='STRINGS', default=None,
completion_callback=libbe.command.util.complete_extra_strings)),
libbe.command.Option(name='sort', short_name='S',
help='Adjust bug-sort criteria with comma-separated list '
'SORT. e.g. "--sort creator,time". '
'Available criteria: %s' % ','.join(AVAILABLE_CMPS),
arg=libbe.command.Argument(
name='sort', metavar='SORT', default=None,
completion_callback=libbe.command.util.Completer(AVAILABLE_CMPS))),
libbe.command.Option(name='tags', short_name='t',
help='Add TAGS: field to standard listing format.'),
libbe.command.Option(name='ids', short_name='i',
help='Only print the bug IDS'),
libbe.command.Option(name='xml', short_name='x',
help='Dump output in XML format'),
])
# parser.add_option("-S", "--sort", metavar="SORT-BY", dest="sort_by",
# help="Adjust bug-sort criteria with comma-separated list SORT-BY. e.g. \"--sort creator,time\". Available criteria: %s" % ','.join(AVAILABLE_CMPS), default=None)
# # boolean options. All but ids and xml are special cases of long forms
# ("w", "wishlist", "List bugs with 'wishlist' severity"),
# ("A", "active", "List all active bugs"),
# ("U", "unconfirmed", "List unconfirmed bugs"),
# ("o", "open", "List open bugs"),
# ("T", "test", "List bugs in testing"),
# for s in bools:
# attr = s[1].replace('-','_')
# short = "-%c" % s[0]
# long = "--%s" % s[1]
# help = s[2]
# parser.add_option(short, long, action="store_true",
# dest=attr, help=help, default=False)
# return parser
#
# ])
def _run(self, **params):
storage = self._get_storage()
bugdirs = self._get_bugdirs()
writeable = storage.writeable
storage.writeable = False
cmp_list, status, severity, assigned, extra_strings_regexps = \
self._parse_params(bugdirs, params)
filter = Filter(status, severity, assigned,
extra_strings_regexps=extra_strings_regexps)
bugs = list(itertools.chain(*list(
[bugdir.bug_from_uuid(uuid) for uuid in bugdir.uuids()]
for bugdir in bugdirs.values())))
bugs = [b for b in bugs if filter(bugdirs, b) == True]
self.result = bugs
if len(bugs) == 0 and params['xml'] == False:
print >> self.stdout, 'No matching bugs found'
# sort bugs
bugs = self._sort_bugs(bugs, cmp_list)
# print list of bugs
if params['ids'] == True:
for bug in bugs:
print >> self.stdout, bug.id.user()
else:
self._list_bugs(bugs, show_tags=params['tags'], xml=params['xml'])
storage.writeable = writeable
return 0
def _parse_params(self, bugdirs, params):
cmp_list = []
if params['sort'] != None:
for cmp in params['sort'].split(','):
if cmp not in AVAILABLE_CMPS:
raise libbe.command.UserError(
'Invalid sort on "%s".\nValid sorts:\n %s'
% (cmp, '\n '.join(AVAILABLE_CMPS)))
cmp_list.append(getattr(libbe.bug, 'cmp_%s' % cmp))
status = parse_status(params['status'])
severity = parse_severity(params['severity'],
important=params['important'])
# select assigned
if params['assigned'] == None:
if params['mine'] == True:
assigned = [self._get_user_id()]
else:
assigned = 'all'
else:
assigned = libbe.command.util.select_values(
params['assigned'], libbe.command.util.assignees(bugdirs))
for i in range(len(assigned)):
if assigned[i] == '-':
assigned[i] = params['user-id']
if params['extra-strings'] == None:
extra_strings_regexps = []
else:
extra_strings_regexps = [re.compile(x)
for x in params['extra-strings'].split(',')]
return (cmp_list, status, severity, assigned, extra_strings_regexps)
def _sort_bugs(self, bugs, cmp_list=None):
if cmp_list is None:
cmp_list = []
cmp_list.extend(libbe.bug.DEFAULT_CMP_FULL_CMP_LIST)
cmp_fn = libbe.bug.BugCompoundComparator(cmp_list=cmp_list)
bugs.sort(cmp_fn)
return bugs
def _list_bugs(self, bugs, show_tags=False, xml=False):
if xml == True:
print >> self.stdout, \
'<?xml version="1.0" encoding="%s" ?>' % self.stdout.encoding
print >> self.stdout, '<be-xml>'
if len(bugs) > 0:
for bug in bugs:
if xml == True:
print >> self.stdout, bug.xml(show_comments=True)
else:
bug_string = bug.string(shortlist=True)
if show_tags == True:
attrs,summary = bug_string.split(' ', 1)
bug_string = (
'%s%s: %s'
% (attrs,
','.join(libbe.command.tag.get_tags(bug)),
summary))
print >> self.stdout, bug_string
if xml == True:
print >> self.stdout, '</be-xml>'
def _long_help(self):
return """
This command lists bugs. Normally it prints a short string like
bea/576:om:[TAGS:] Allow attachments
Where
bea/576 the bug id
o the bug status is 'open' (first letter)
m the bug severity is 'minor' (first letter)
TAGS comma-separated list of bug tags (if --tags is set)
Allo... the bug summary string
You can optionally (-u) print only the bug ids.
There are several criteria that you can filter by:
* status
* severity
* assigned (who the bug is assigned to)
Allowed values for each criterion may be given in a comma seperated
list. The special string "all" may be used with any of these options
to match all values of the criterion. As with the --status and
--severity options for `be depend`, starting the list with a minus
sign makes your selections a blacklist instead of the default
whitelist.
status
%s
severity
%s
assigned
free form, with the string '-' being a shortcut for yourself.
In addition, there are some shortcut options that set boolean flags.
The boolean options are ignored if the matching string option is used.
""" % (','.join(libbe.bug.status_values),
','.join(libbe.bug.severity_values))
| gpl-2.0 |
alexforencich/python-ivi | ivi/tektronix/tektronixMSO5204B.py | 1 | 1557 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMSO5000 import *
class tektronixMSO5204B(tektronixMSO5000):
"Tektronix MSO5204B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO5204B')
super(tektronixMSO5204B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._bandwidth = 2e9
self._init_channels()
| mit |
simbha/mAngE-Gin | lib/Django 1.7/django/contrib/staticfiles/finders.py | 106 | 9852 | from collections import OrderedDict
import os
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.functional import empty, LazyObject
from django.utils.module_loading import import_string
from django.utils._os import safe_join
from django.utils import six, lru_cache
from django.contrib.staticfiles import utils
# To keep track on which directories the finder has searched the static files.
searched_locations = []
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a find() method')
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a list() method')
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, app_names=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = OrderedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute.
"""
storage_class = FileSystemStorage
source_dir = 'static'
def __init__(self, app_names=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app names to storage instances
self.storages = OrderedDict()
app_configs = apps.get_app_configs()
if app_names:
app_names = set(app_names)
app_configs = [ac for ac in app_configs if ac.name in app_names]
for app_config in app_configs:
app_storage = self.storage_class(
os.path.join(app_config.path, self.source_dir))
if os.path.isdir(app_storage.location):
self.storages[app_config.name] = app_storage
if app_config.name not in self.apps:
self.apps.append(app_config.name)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
app_location = self.storages[app].location
if app_location not in searched_locations:
searched_locations.append(app_location)
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.location not in searched_locations:
searched_locations.append(self.storage.location)
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
searched_locations[:] = []
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return [] if all else None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
@lru_cache.lru_cache(maxsize=None)
def get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
| mit |
Kmayankkr/robocomp | tools/rcmonitor/someTest.py | 5 | 1932 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
# Custom Template
import Ice
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.ic = Ice.initialize(sys.argv)
self.mods = modules
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompCamara'].CamaraPrx.checkedCast(self.prx)
self.measures = range(33)
self.job()
def job(self):
# Remote procedure call
output = self.proxy.getRGBPackedImage(5) # vector, head, bState
# Store image
self.image = output[0]
# Store pos measure
self.measures.pop(0)
self.measures.append(output[1].tilt.pos)
def paintEvent(self, event=None):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
# Draw image
qimage = QImage(self.image, 320, 240, QImage.Format_RGB888)
painter.drawImage(QPointF(0, 0), qimage)
# Draw signal
for idx in range(len(self.measures)-1):
painter.drawLine(idx*10, (self.height()/2)-(self.measures[idx]*100), (idx+1)*10, (self.height()/2)-(self.measures[idx+1]*100))
painter.end()
| gpl-3.0 |
binarytemple/ansible | plugins/inventory/rax.py | 24 | 9460 | #!/usr/bin/env python
# (c) 2013, Jesse Keating <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: rax
short_description: Rackspace Public Cloud external inventory script
description:
- Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
- |
When run against a specific host, this script returns the following
variables:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
where some item can have nested structure.
- credentials are set in a credentials file
version_added: None
options:
creds_file:
description:
- File to find the Rackspace Public Cloud credentials in
required: true
default: null
region:
description:
- An optional value to narrow inventory scope, i.e. DFW, ORD, IAD, LON
required: false
default: null
authors:
- Jesse Keating <[email protected]>
- Paul Durivage <[email protected]>
- Matt Martz <[email protected]>
notes:
- RAX_CREDS_FILE is an optional environment variable that points to a
pyrax-compatible credentials file.
- If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials.
- See https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating
- RAX_REGION is an optional environment variable to narrow inventory search
scope
- RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list
- RAX_ENV is an environment variable that will use an environment as
configured in ~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration
- RAX_META_PREFIX is an environment variable that changes the prefix used
for meta key/value groups. For compatibility with ec2.py set to
RAX_META_PREFIX=tag
requirements: [ "pyrax" ]
examples:
- description: List server instances
code: RAX_CREDS_FILE=~/.raxpub rax.py --list
- description: List servers in ORD datacenter only
code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
- description: List servers in ORD and DFW datacenters
code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
- description: Get server details for server named "server.example.com"
code: RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
'''
import os
import re
import sys
import argparse
import collections
from types import NoneType
try:
import json
except:
import simplejson as json
try:
import pyrax
except ImportError:
print('pyrax is required for this module')
sys.exit(1)
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
def rax_slugify(value):
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
if extra_group:
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in server.metadata.iteritems():
prefix = os.getenv('RAX_META_PREFIX', 'meta')
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
hostvars[server.name]['ansible_ssh_host'] = server.accessIPv4
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = os.getenv('RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
try:
creds_file = os.path.expanduser(os.environ['RAX_CREDS_FILE'])
except KeyError, e:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.stderr.write('No value in environment variable %s and/or no '
'credentials file at %s\n'
% (e.message, default_creds_file))
sys.exit(1)
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception, e:
sys.stderr.write("%s: %s\n" % (e, e.message))
sys.exit(1)
regions = []
if region:
regions.append(region)
else:
for region in os.getenv('RAX_REGION', 'all').split(','):
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.stderr.write('Unsupported region %s' % region)
sys.exit(1)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions)
elif args.host:
host(regions, args.host)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/Paste-2.0.1/paste/gzipper.py | 50 | 3611 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
WSGI middleware
Gzip-encodes the response.
"""
import gzip
from paste.response import header_value, remove_header
from paste.httpheaders import CONTENT_LENGTH
import six
class GzipOutput(object):
pass
class middleware(object):
def __init__(self, application, compress_level=6):
self.application = application
self.compress_level = int(compress_level)
def __call__(self, environ, start_response):
if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING', ''):
# nothing for us to do, so this middleware will
# be a no-op:
return self.application(environ, start_response)
response = GzipResponse(start_response, self.compress_level)
app_iter = self.application(environ,
response.gzip_start_response)
if app_iter is not None:
response.finish_response(app_iter)
return response.write()
class GzipResponse(object):
def __init__(self, start_response, compress_level):
self.start_response = start_response
self.compress_level = compress_level
self.buffer = six.BytesIO()
self.compressible = False
self.content_length = None
def gzip_start_response(self, status, headers, exc_info=None):
self.headers = headers
ct = header_value(headers,'content-type')
ce = header_value(headers,'content-encoding')
self.compressible = False
if ct and (ct.startswith('text/') or ct.startswith('application/')) \
and 'zip' not in ct:
self.compressible = True
if ce:
self.compressible = False
if self.compressible:
headers.append(('content-encoding', 'gzip'))
remove_header(headers, 'content-length')
self.headers = headers
self.status = status
return self.buffer.write
def write(self):
out = self.buffer
out.seek(0)
s = out.getvalue()
out.close()
return [s]
def finish_response(self, app_iter):
if self.compressible:
output = gzip.GzipFile(mode='wb', compresslevel=self.compress_level,
fileobj=self.buffer)
else:
output = self.buffer
try:
for s in app_iter:
output.write(s)
if self.compressible:
output.close()
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
content_length = self.buffer.tell()
CONTENT_LENGTH.update(self.headers, content_length)
self.start_response(self.status, self.headers)
def filter_factory(application, **conf):
import warnings
warnings.warn(
'This function is deprecated; use make_gzip_middleware instead',
DeprecationWarning, 2)
def filter(application):
return middleware(application)
return filter
def make_gzip_middleware(app, global_conf, compress_level=6):
"""
Wrap the middleware, so that it applies gzipping to a response
when it is supported by the browser and the content is of
type ``text/*`` or ``application/*``
"""
compress_level = int(compress_level)
return middleware(app, compress_level=compress_level)
| gpl-2.0 |
geertj/python-tdbus | examples/avahi.py | 1 | 1381 | #!/usr/bin/env python
#
# This file is part of python-tdbus. Python-tdbus is free software
# available under the terms of the MIT license. See the file "LICENSE" that
# was provided together with this source file for the licensing terms.
#
# Copyright (c) 2012 the python-tdbus authors. See the file "AUTHORS" for a
# complete list.
# This example shows how to access Avahi on the D-BUS.
import sys
from tdbus import *
CONN_AVAHI = 'org.freedesktop.Avahi'
PATH_SERVER = '/'
IFACE_SERVER = 'org.freedesktop.Avahi.Server'
conn = Connection(DBUS_BUS_SYSTEM)
dispatcher = BlockingDispatcher(conn)
try:
result = dispatcher.call_method(PATH_SERVER, 'GetVersionString',
interface=IFACE_SERVER, destination=CONN_AVAHI)
except Error:
print 'Avahi NOT available.'
raise
print 'Avahi is available at %s' % CONN_AVAHI
print 'Avahi version: %s' % result[0]
print
print 'Browsing service types on domain: local'
print 'Press CTRL-\\ to exit'
print
result = dispatcher.call_method('/', 'ServiceTypeBrowserNew', interface=IFACE_SERVER,
destination=CONN_AVAHI, format='iisu', args=(-1, 0, 'local', 0))
browser = result[0]
def item_new(message, dispatcher):
args = message.get_args()
print 'service %s exists on domain %s' % (args[2], args[3])
dispatcher.add_signal_handler(browser, 'ItemNew', item_new)
dispatcher.dispatch()
| mit |
yajnab/android_kernel_samsung_msm7x27 | tools/perf/scripts/python/sctop.py | 895 | 1936 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import thread
import time
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40d %10d\n" % (id, val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
sajuptpm/manila | manila/tests/cmd/test_manage.py | 2 | 15025 | # Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import code
import readline
import sys
import ddt
import mock
from oslo_config import cfg
import six
from manila.cmd import manage as manila_manage
from manila import context
from manila import db
from manila.db import migration
from manila import test
from manila import version
CONF = cfg.CONF
@ddt.ddt
class ManilaCmdManageTestCase(test.TestCase):
def setUp(self):
super(ManilaCmdManageTestCase, self).setUp()
sys.argv = ['manila-share']
CONF(sys.argv[1:], project='manila', version=version.version_string())
self.shell_commands = manila_manage.ShellCommands()
self.host_commands = manila_manage.HostCommands()
self.db_commands = manila_manage.DbCommands()
self.version_commands = manila_manage.VersionCommands()
self.config_commands = manila_manage.ConfigCommands()
self.get_log_cmds = manila_manage.GetLogCommands()
self.service_cmds = manila_manage.ServiceCommands()
def test_param2id_is_uuid_like(self):
obj_id = '12345678123456781234567812345678'
self.assertEqual(obj_id, manila_manage.param2id(obj_id))
def test_param2id_not_uuid_like_with_dash(self):
obj_id = '112-112-112'
self.assertIsNone(manila_manage.param2id(obj_id))
def test_param2id_not_uuid_like_without_dash(self):
obj_id = '123'
self.assertEqual(123, manila_manage.param2id(obj_id))
def test_param2id_not_uuid_like_value_error(self):
obj_id = 'invalidvalue'
self.assertRaises(ValueError, manila_manage.param2id, obj_id)
@mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock())
def test_shell_commands_bpython(self):
self.shell_commands.bpython()
manila_manage.ShellCommands.run.assert_called_once_with('bpython')
@mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock())
def test_shell_commands_ipython(self):
self.shell_commands.ipython()
manila_manage.ShellCommands.run.assert_called_once_with('ipython')
@mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock())
def test_shell_commands_python(self):
self.shell_commands.python()
manila_manage.ShellCommands.run.assert_called_once_with('python')
@ddt.data({}, {'shell': 'bpython'})
def test_run_bpython(self, kwargs):
try:
import bpython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(bpython, 'embed')
self.shell_commands.run(**kwargs)
bpython.embed.assert_called_once_with()
def test_run_bpython_import_error(self):
try:
import bpython
import IPython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(bpython, 'embed',
mock.Mock(side_effect=ImportError()))
self.mock_object(IPython, 'embed')
self.shell_commands.run(shell='bpython')
IPython.embed.assert_called_once_with()
def test_run(self):
try:
import bpython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(bpython, 'embed')
self.shell_commands.run()
bpython.embed.assert_called_once_with()
def test_run_ipython(self):
try:
import IPython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(IPython, 'embed')
self.shell_commands.run(shell='ipython')
IPython.embed.assert_called_once_with()
def test_run_ipython_import_error(self):
try:
import IPython
if not hasattr(IPython, 'Shell'):
setattr(IPython, 'Shell', mock.Mock())
setattr(IPython.Shell, 'IPShell',
mock.Mock(side_effect=ImportError()))
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(IPython, 'embed',
mock.Mock(side_effect=ImportError()))
self.mock_object(readline, 'parse_and_bind')
self.mock_object(code, 'interact')
shell = IPython.embed.return_value
self.shell_commands.run(shell='ipython')
IPython.Shell.IPShell.assert_called_once_with(argv=[])
self.assertFalse(shell.mainloop.called)
self.assertTrue(readline.parse_and_bind.called)
code.interact.assert_called_once_with()
def test_run_python(self):
self.mock_object(readline, 'parse_and_bind')
self.mock_object(code, 'interact')
self.shell_commands.run(shell='python')
readline.parse_and_bind.assert_called_once_with("tab:complete")
code.interact.assert_called_once_with()
def test_run_python_import_error(self):
self.mock_object(readline, 'parse_and_bind')
self.mock_object(code, 'interact')
self.shell_commands.run(shell='python')
readline.parse_and_bind.assert_called_once_with("tab:complete")
code.interact.assert_called_once_with()
@mock.patch('six.moves.builtins.print')
def test_list(self, print_mock):
serv_1 = {
'host': 'fake_host1',
'availability_zone': 'avail_zone1',
}
serv_2 = {
'host': 'fake_host2',
'availability_zone': 'avail_zone2',
}
self.mock_object(db, 'service_get_all',
mock.Mock(return_value=[serv_1, serv_2]))
self.mock_object(context, 'get_admin_context',
mock.Mock(return_value='admin_ctxt'))
self.host_commands.list(zone='avail_zone1')
context.get_admin_context.assert_called_once_with()
db.service_get_all.assert_called_once_with('admin_ctxt')
print_mock.assert_has_calls([
mock.call(u'host \tzone '),
mock.call('fake_host1 \tavail_zone1 ')])
@mock.patch('six.moves.builtins.print')
def test_list_zone_is_none(self, print_mock):
serv_1 = {
'host': 'fake_host1',
'availability_zone': 'avail_zone1',
}
serv_2 = {
'host': 'fake_host2',
'availability_zone': 'avail_zone2',
}
self.mock_object(db, 'service_get_all',
mock.Mock(return_value=[serv_1, serv_2]))
self.mock_object(context, 'get_admin_context',
mock.Mock(return_value='admin_ctxt'))
self.host_commands.list()
context.get_admin_context.assert_called_once_with()
db.service_get_all.assert_called_once_with('admin_ctxt')
print_mock.assert_has_calls([
mock.call(u'host \tzone '),
mock.call('fake_host1 \tavail_zone1 '),
mock.call('fake_host2 \tavail_zone2 ')])
def test_sync(self):
self.mock_object(migration, 'upgrade')
self.db_commands.sync(version='123')
migration.upgrade.assert_called_once_with('123')
def test_version(self):
self.mock_object(migration, 'version')
self.db_commands.version()
migration.version.assert_called_once_with()
def test_downgrade(self):
self.mock_object(migration, 'downgrade')
self.db_commands.downgrade(version='123')
migration.downgrade.assert_called_once_with('123')
def test_revision(self):
self.mock_object(migration, 'revision')
self.db_commands.revision('message', True)
migration.revision.assert_called_once_with('message', True)
def test_stamp(self):
self.mock_object(migration, 'stamp')
self.db_commands.stamp(version='123')
migration.stamp.assert_called_once_with('123')
def test_version_commands_list(self):
self.mock_object(version, 'version_string',
mock.Mock(return_value='123'))
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
self.version_commands.list()
version.version_string.assert_called_once_with()
self.assertEqual('123\n', fake_out.getvalue())
def test_version_commands_call(self):
self.mock_object(version, 'version_string',
mock.Mock(return_value='123'))
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
self.version_commands()
version.version_string.assert_called_once_with()
self.assertEqual('123\n', fake_out.getvalue())
def test_get_log_commands_no_errors(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
CONF.set_override('log_dir', None)
expected_out = 'No errors in logfiles!\n'
self.get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('six.moves.builtins.open')
@mock.patch('os.listdir')
def test_get_log_commands_errors(self, listdir, open):
CONF.set_override('log_dir', 'fake-dir')
listdir.return_value = ['fake-error.log']
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
open.return_value = six.StringIO(
'[ ERROR ] fake-error-message')
expected_out = ('fake-dir/fake-error.log:-\n'
'Line 1 : [ ERROR ] fake-error-message\n')
self.get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
open.assert_called_once_with('fake-dir/fake-error.log', 'r')
listdir.assert_called_once_with(CONF.log_dir)
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.exists')
def test_get_log_commands_syslog_no_log_file(self, path_exists, open):
path_exists.return_value = False
exit = self.assertRaises(SystemExit, self.get_log_cmds.syslog)
self.assertEqual(exit.code, 1)
path_exists.assert_any_call('/var/log/syslog')
path_exists.assert_any_call('/var/log/messages')
@mock.patch('manila.utils.service_is_up')
@mock.patch('manila.db.service_get_all')
@mock.patch('manila.context.get_admin_context')
def test_service_commands_list(self, get_admin_context, service_get_all,
service_is_up):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
service = {'binary': 'manila-binary',
'host': 'fake-host.fake-domain',
'availability_zone': 'fake-zone',
'updated_at': '2014-06-30 11:22:33',
'disabled': False}
service_get_all.return_value = [service]
service_is_up.return_value = True
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print_format = format % ('Binary',
'Host',
'Zone',
'Status',
'State',
'Updated At')
service_format = format % (service['binary'],
service['host'].partition('.')[0],
service['availability_zone'],
'enabled',
':-)',
service['updated_at'])
expected_out = print_format + '\n' + service_format + '\n'
self.service_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
get_admin_context.assert_called_with()
service_get_all.assert_called_with(ctxt)
service_is_up.assert_called_with(service)
def test_methods_of(self):
obj = type('Fake', (object,),
{name: lambda: 'fake_' for name in ('_a', 'b', 'c')})
expected = [('b', obj.b), ('c', obj.c)]
self.assertEqual(expected, manila_manage.methods_of(obj))
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_argv_lt_2(self, register_cli_opt):
script_name = 'manila-manage'
sys.argv = [script_name]
CONF(sys.argv[1:], project='manila', version=version.version_string())
exit = self.assertRaises(SystemExit, manila_manage.main)
self.assertTrue(register_cli_opt.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_log.log.register_options')
@mock.patch('oslo_log.log.setup')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_sudo_failed(self, register_cli_opt, log_setup,
register_log_opts, config_opts_call):
script_name = 'manila-manage'
sys.argv = [script_name, 'fake_category', 'fake_action']
config_opts_call.side_effect = cfg.ConfigFilesNotFoundError(
mock.sentinel._namespace)
exit = self.assertRaises(SystemExit, manila_manage.main)
self.assertTrue(register_cli_opt.called)
register_log_opts.assert_called_once_with(CONF)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='manila',
version=version.version_string())
self.assertFalse(log_setup.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
@mock.patch('oslo_log.log.register_options')
def test_main(self, register_log_opts, register_cli_opt, config_opts_call):
script_name = 'manila-manage'
sys.argv = [script_name, 'config', 'list']
action_fn = mock.MagicMock()
CONF.category = mock.MagicMock(action_fn=action_fn)
manila_manage.main()
self.assertTrue(register_cli_opt.called)
register_log_opts.assert_called_once_with(CONF)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='manila', version=version.version_string())
self.assertTrue(action_fn.called)
@ddt.data('bar', '-bar', '--bar')
def test_get_arg_string(self, arg):
parsed_arg = manila_manage.get_arg_string(arg)
self.assertEqual('bar', parsed_arg)
| apache-2.0 |
Jc11235/Kekulean_Program | GUI_Version/Ubuntu_Version/DriverMethods.py | 1 | 39406 | from PerfectMatchingData import *
from Face import *
from Vertex import *
from Graph import *
from VertexList import *
from Output import *
from KekuleanMethods import *
from Checkers import *
from RequiredEdgeMethods import *
from Tkinter import *
from AppInformation import *
from random import randint
import time
import os
import shutil
import multiprocessing as mp
import threading
Break = False
BreakLoop = False
#These methods the main drivers of the program. Some of their helper methods are also present here.
settings = {}
#function that reads in the graph returns a 2D string list of the graph
def getInput(fileName):
faceGraph = []
inputFile = open(fileName, 'r')
row = inputFile.readline()
y = 0
while len(row) > 0:
row = row.replace('\n', '')
row = row.split(" ")
for i in range(len(row)):
x = row[i]
faceGraph.append((Face(int(x), y)))
row = inputFile.readline()
y += 1
inputFile.close()
return faceGraph
def getSettings():
fileName = "settings.txt"
inputFile = open(fileName, 'r')
lineNumber = 0
minW = 0
maxW = 0
minH = 0
maxH = 0
line = inputFile.readline()
while len(line) > 0:
line = line.replace('\n', '')
settings[lineNumber] = float(line)
line = inputFile.readline()
lineNumber += 1
inputFile.close()
def resetGraph(root,appInfo,submitGraph,graphNumberEntry,view):
submitGraph.destroy()
view.destroy()
graphNumberEntry.destroy()
def analyzeGraph(root,appInfo):
root.geometry("600x400")
selection = StringVar()
choiceEntry = Entry(root, textvariable = selection)
choice = selection.get()
def callback(root,appInfo,choice,selection,choiceEntry,fileName = "graph.txt"):
loading = Label(root, text="Analyzing graph data, this may take a few minutes.")
loading.pack()
fileName = fileName
faceGraph = getInput(fileName)
#check for connectedness
connected = isConnected(faceGraphToInts(faceGraph))
if connected == True:
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
loading.destroy()
choiceEntry.pack()
typeSelection = Label(root, text="Would you like to view the graphs ranked by Fries or Clars?")
typeSelection.pack()
submit = Button(root, text ="Submit", command = lambda: userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry))
submit.pack(side = BOTTOM)
while True:
choice = selection.get()
flag = False
exit = False
if choice != 'fries' and choice != 'clars' and choice != "":
againSelection = Label(root, text="That file does not exist, please try again.")
againSelection.pack()
print "again"
flag = True
while choice != 'fries' and choice != 'clars':
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
againSelection.update_idletasks()
choice = selection.get()
if exit == True:
againSelection.destroy()
break
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
t = threading.Thread(target = lambda: callback(root,appInfo,choice,selection,choiceEntry))
t.setDaemon(True)
appInfo.setThreads(t)
t.start()
def userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry):
structureNumber = IntVar()
submit.destroy()
typeSelection.destroy()
choiceEntry.destroy()
def displayClarFries(structureNumber,structures,choice):
structures.sort()
if choice == 'clars':
Graph.comparison = 'clars'
elif choice == 'fries':
Graph.comparison = 'fries'
structures.reverse()
structures[structureNumber].displayGraph()
view = Label(root, text="There are " + str(len(structures)) + " distince Kekule structures avaiable. Which would you like to view?")
view.pack()
graphNumberEntry = Entry(root, textvariable = structureNumber)
graphNumberEntry.pack()
number = structureNumber.get()
submitGraph = Button(root, text ="Submit Structure", command = lambda: displayClarFries(number,structures,choice))
submitGraph.pack(side = BOTTOM)
def deleteB(button):
button.destroy()
reset = Button(root, text ="Quit", command = lambda: resetB(root,appInfo,submitGraph,graphNumberEntry,view))
reset.pack(side = BOTTOM)
def resetB(root,appInfo,submitGraph,graphNumberEntry,view):
deleteB(reset)
resetGraph(root,appInfo,submitGraph,graphNumberEntry,view)
#A user-entered number of graphs are generated and tested for Kekulean-ness and written to their proper text files
def randomIntoFiles():
kekuleanFile = open("Kekuleans.txt", "w")
notKekuleanFile = open("NotKekulean.txt", "w")
numK = 0
numNotK = 0
trials = int(raw_input("How many graphs would you like to create? "))
print "\n" #just to provide some visual space
t1 = time.time()
for i in range(trials):
faceGraph = createRandomConnectedGraph()
vGraph = makeVertexGraph(faceGraph)
randGraph = Graph(faceGraph, vGraph)
if isKekulean(randGraph) == True:
numK += 1
kekuleanFile.write("Graph #" + str(numK) + "\n")
kekuleanFile.write(randGraph.simpleToString() + '\n')
else:
numNotK += 1
notKekuleanFile.write("Graph #" + str(numNotK) + "\n")
notKekuleanFile.write(randGraph.simpleToString() + '\n')
#print randGraph
#print "\n"
t2 = time.time()
print "\n" + str(numK) + " Kekulean graph(s) were found.\n" + str(numNotK) + " non-Kekulean graph(s) were found."
print "Time elapsed (in seconds): " + str(t2 - t1) + "\n"
kekuleanFile.close()
notKekuleanFile.close()
#creates a random Kekulean graph ands does stuff with it and saves it to an png
def createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
randomGraph = _createRandomKekulean()
print "There are", len(randomGraph.getVertexGraph()), "vertices"
graphs = assignMatching(randomGraph)
graphs.sort()
if len(graphs) > 0:
#save graphs as PNG file
savePNG(graphs, "graphs - Fries.png")
Graph.comparison = 'clars'
graphs.sort()
savePNG(graphs, "graphs - Clars.png")
while True:
choice = raw_input("Would you like to view the graphs ranked by Fries or Clars? (or quit?) ")
while choice.lower() != 'fries' and choice.lower() != 'clars' and choice.lower() != 'quit':
choice = raw_input("Would you like to view the graphs ranked by Fries or Clars? (or quit?) ")
if choice.lower() == 'clars':
Graph.comparison = 'clars'
elif choice.lower() == 'fries':
Graph.comparison = 'fries'
else:
break
graphs.sort()
graphs.reverse()
print "There are", len(graphs), "Kekulean structures"
displayGraphs(graphs)
else:
print "error - Graph is Kekulean but has no perfect matching - see error.txt for graph"
errorFile = open("error.txt", "w")
errorFile.write(randomGraph.simpleToString() + '\n')
#Creates a random planar graph, which may not be connected
def createRandomGraph():
height = randint(settings[2], settings[3])
randGraph = []
for i in range(height):
rowLength = randint(settings[0], settings[1])
row = getRow(rowLength, i)
while len(row) == 0:
row = getRow(rowLength, i)
randGraph.extend(row)
if checkAlignment(randGraph) == False:
randGraph = createRandomGraph()
return randGraph
def checkAlignment(graph):
for face in graph:
if face.getX() == 0:
break
else:
#there is no face on the y-axis
return False
for face in graph:
if face.getY() == 0:
break
else:
#there is no face on the x-axis
return False
#there is a face on the x-axis
return True
def createRandomConnectedGraph():
g = createRandomGraph()
while isConnected(faceGraphToInts(g)) == False:
g = createRandomGraph()
return g
#generates a row for the the createRandomGraph method
def getRow(rl, rowNum):
r = []
for j in range(rl):
chance = randint(0, 100)
if chance > settings[4] * 100:
r.append(Face(j, rowNum))
return r
def _createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
while isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
while isKekulean(randomGraph) == False:
#print "making K"
randomFaces = createRandomGraph()
while isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
if isKekulean(randomGraph):
return randomGraph
else:
return _createRandomKekulean()
def createManyKekuleans():
graphs = [] #list of kekulean graphs
graphList = [] #list of the Kekulean graphs with their matchings, and Fries/Clars Faces
trials = int(raw_input("How many graphs would you like to create? "))
pool = mp.Pool(mp.cpu_count())
results = [pool.apply_async(_createRandomKekulean) for x in range(trials)]
graphs = [r.get() for r in results]
for g in graphs:
graphList.extend(assignMatching(g))
graphList.sort()
if len(graphList) > 0:
print "There are", len(graphList), "Kekulean structures"
displayGraphs(graphList)
def testKekuleanThms():
conflictFile = open("conflict.txt", "w")
interval = float(raw_input("How many hours would you like to run the program?"))
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
counter = 0
while t2 - t1 < timeLimit:
print "graph #" + str(counter)
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
nelsonThm = isOldKekulean(randomGraph)
perfectMatchingThm = isKekulean(randomGraph)
if nelsonThm != perfectMatchingThm:
conflictFile.write("Perfect matching: " + str(perfectMatchingThm) + " Nelson Thm: " + str(nelsonThm) + "\n")
conflictFile.write(randomGraph.simpleToString())
conflictFile.write("\n")
t2 = time.time()
counter += 1
conflictFile.close()
#takes a row and returns a the number of vertical edges in that row
def getRowEdgeCount(row):
edgeCount = 0
f = 0
for i in range(len(row)):
edgeCount += 1
try:
f = row[i+1]
except:
f = None
if row[i] + 1 != f or f == None:
edgeCount += 1
return edgeCount
def getMinRows(g):
minRows = {}
index = 0
minEdges = sys.maxint
for r in g:
edgeCount = getRowEdgeCount(r)
if edgeCount < minEdges:
minEdges = edgeCount
minRows.clear()
minRows[index] = r
elif edgeCount == minEdges:
minRows[index] = r
index += 1
return minRows
#counts up the number of peaks above each row and stores those values in a list at indexes that correspond to the the row of the graph
def getPeaksAboveRows(g):
peaksAboveRow = [0]*(len(g))
for r in range(len(g)):
#print "r: " + str(r)
row = g[r]
if r > 0:
peaksAboveRow[r] += peaksAboveRow[r-1]
for col in range(len(row)):
face = row[col]
if searchRow(face, True, g, r) == True:
peaksAboveRow[r] += 1
#print "Peak at: " + str(r) + ", " + str(col)
if searchRow(face, False, g, r) == True and r < len(g)-1:
peaksAboveRow[r+1] -= 1
#print "Valley at: " + str(r) + ", " + str(col)
peaksAboveRow[r] = abs(peaksAboveRow[r])
return peaksAboveRow
#Theorem I devoloped
def NelsonThm(peaks, g):
kekulean = True
minRows = getMinRows(g)
for i, row in minRows.items():
if peaks[i] > getRowEdgeCount(row):
kekulean = False
break
return kekulean
#ckesks of a graph is Kekulean and returns a boolean
def isOldKekulean(graph):
fg = faceGraphToInts(graph.getFaceGraph())
peaksAbove = getPeaksAboveRows(fg)
#print peaksAbove
kekulean = NelsonThm(peaksAbove, fg)
return kekulean
def getUpperBounds(graph):
#faceGraph = getInput(filename)
#vertexGraph = makeVertexGraph(faceGraph)
#graph = Graph(faceGraph, vertexGraph)
kekulean = isKekulean(graph)
if kekulean == True:
rowCount = [0] * graph.getNumberOfRows()
whiteCount = [0] * graph.getNumberOfRows()
blackCount = [0] * graph.getNumberOfRows()
print "len:", len(whiteCount)
for v in graph.getVertexGraph():
#even y numbers mean the vertex is marked white on the graph
if v.getY() % 2 == 0:
index = v.getY() / 2
if index < len(whiteCount):
whiteCount[index] += 1
#The else implies that the vertex's y is odd, and thus the verex is marked black
else:
index = (v.getY() - 1) / 2
if index < len(blackCount):
blackCount[index] += 1
print "Upper Bonds of the graph per row:"
for index in range(len(rowCount)):
count = abs(sum(whiteCount[0:index+1]) - sum(blackCount[0:index+1]))
print count
rowCount[index] = count
totalUpperBonds = sum(rowCount)
print "Upper bond of the graph:", totalUpperBonds
else:
print "The graph is not Kekulean"
def testConjectureSameFaces(root,interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
graphList = []
graphNumber = 0
counter = 0
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#must be 'fries' or 'clars'
Graph.comparison = 'clars'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
#h.setString(structures[0].simpleToString())
#is the data right?
#print "Verts:", h.getNumVertices()
#print "Structures:", h.getNumStructures()
#print "Clar:", h.getFriesNumber()
for g in graphList:
if(h.getFaces() == g.getFaces()):
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
#first part
if h.getClarsNumber() > g.getClarsNumber():
print 'Conjecture is false:'
drawConflictsCC(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
graphNumber += 1
text.update_idletasks()
quit.update_idletasks()
scrollbar.update_idletasks()
text.destroy()
scrollbar.destroy()
quit.destroy()
#second part
def testConjectureSameFacesKKFF(root, interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
graphList = []
graphNumber = 0
counter = 0
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#must be 'fries' or 'clars'
Graph.comparison = 'fries'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
clarNumberStructure = []
friesNumberStructure = []
for g in graphList:
if(h.getFaces() == g.getFaces()):
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
if h.getFriesNumber() > g.getFriesNumber():
drawConflictsKKFF(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
graphNumber += 1
text.update_idletasks()
quit.update_idletasks()
scrollbar.update_idletasks()
text.destroy()
scrollbar.destroy()
quit.destroy()
def testConjectureSameFacesFFCC(root, interval):
clarNumberStructures = []
friesNumberStructures = []
graphs = []
graphList = []
temp = 0
graphNumber = 0
counter = 0
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
randomGraph.setMaxClarManual(setMaxClar(randomGraph))
randomGraph.setMaxFriesManual(setMaxFries(randomGraph))
h = structures[-1]
graphs.append(randomGraph)
h.setMaxClarManual(setMaxClar(randomGraph))
h.setMaxFriesManual(setMaxFries(randomGraph))
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
graphCount = 0
graphNumber += 1
for g in graphList:
if(g.getFaces() == h.getFaces()):
if g.getNumVertices() == h.getNumVertices():
if g.getNumStructures() < h.getNumStructures():
if g.getMaxClar() > h.getMaxClar():
if g.getMaxFries() < h.getMaxFries():
print 'Conjecture is false:\n'
saveClarFaceFFCC(graphs[graphCount],randomGraph,temp)
saveFriesFaceFFCC(graphs[graphCount],randomGraph,temp)
folderName = "FFCCConjectureConflicts"
fileName = folderName + "/" + str(randomGraph.getNumVertices()) + "_" + str(temp)+ "/info" + ".txt"
f = open(fileName,'w')
f.write("C1: " + str(g.getMaxClar()) + " C2: " + str(h.getMaxClar()) + " F1: " + str(g.getMaxFries()) + " F2: " + str(h.getMaxFries()) + "\n")
f.write(str(faceGraphToInts(g.getFaceGraph())) + "\n")
f.write(str(faceGraphToInts(h.getFaceGraph())) + "\n")
f.close()
temp += 1
graphCount += 1
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
def setMaxFries(graph):
g = graph.getFaceGraph()
v = makeVertexGraph(g)
G = Graph(g,v)
structures = assignMatching(G)
Graph.comparison = 'fries'
structures.sort()
return structures[-1].getFriesNumber()
def setMaxClar(graph):
g = graph.getFaceGraph()
v = makeVertexGraph(g)
G = Graph(g,v)
structures = assignMatching(G)
Graph.comparison = 'clars'
structures.sort()
return structures[-1].getClarsNumber()
def saveClarFaceFFCC(graph1,graph2,count):
g1 = graph1.getFaceGraph()
g2 = graph2.getFaceGraph()
v1 = makeVertexGraph(g1)
v2 = makeVertexGraph(g2)
G1 = Graph(g1,v1)
G2 = Graph(g2,v2)
structures1 = assignMatching(G1)
structures2 = assignMatching(G2)
Graph.comparison = 'clars'
structures1.sort()
structures2.sort()
h1 = structures1[-1]
h2 = structures2[-1]
if not os.path.exists("FFCCConjectureConflicts"):
os.mkdir("FFCCConjectureConflicts")
folderName = "FFCCConjectureConflicts/" + str(G1.getNumVertices()) + "_" + str(count)
#setup folder
if not os.path.exists(folderName):
os.mkdir(folderName)
#print "adding"
fileName1 = folderName + "/clar1" + ".png"
fileName2 = folderName + "/clar2" + ".png"
#print fileName1
saveSinglePNG(h1,fileName1)
saveSinglePNG(h2,fileName2)
def saveFriesFaceFFCC(graph1,graph2,count):
g1 = graph1.getFaceGraph()
g2 = graph2.getFaceGraph()
v1 = makeVertexGraph(g1)
v2 = makeVertexGraph(g2)
G1 = Graph(g1,v1)
G2 = Graph(g2,v2)
structures1 = assignMatching(G1)
structures2 = assignMatching(G2)
Graph.comparison = 'fries'
structures1.sort()
structures2.sort()
h1 = structures1[-1]
h2 = structures2[-1]
if not os.path.exists("FFCCConjectureConflicts"):
os.mkdir("FFCCConjectureConflicts")
folderName = "FFCCConjectureConflicts/" + str(G1.getNumVertices()) + "_" + str(count)
#setup folder
if not os.path.exists(folderName):
os.mkdir(folderName)
#print "adding"
fileName1 = folderName + "/fries1" + ".png"
fileName2 = folderName + "/fries2" + ".png"
#print fileName1
saveSinglePNG(h1,fileName1)
saveSinglePNG(h2,fileName2)
def testConjectureDifferentFaces(hours=0):
graphList = []
results = open("results.txt", "w")
results.write("The program actually run!")
if hours == 0:
interval = float(raw_input("How many hours would you like to run the program? "))
else:
interval = hours
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
counter = 0
while t2 - t1 < timeLimit:
print "graph #" + str(counter)
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
for f in randomGraph.getFaceGraph():
pairs = randomGraph.getBondedVertices(f)
print str(pairs)
#must be 'fries' or 'clars'
Graph.comparison = 'clars'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
#h.setString(structures[0].simpleToString())
#is the data right?
#print "Verts:", h.getNumVertices()
#print "Structures:", h.getNumStructures()
#print "Clar:", h.getFriesNumber()
for g in graphList:
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
#first part
if h.getClarsNumber() > g.getClarsNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Clars: ' + str(h.getClarsNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Clars: ' + str(g.getClarsNumber()) + " Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsCC(g, h)
#second part
if h.getFriesNumber() > g.getFriesNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Fries: ' + str(h.getFriesNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Fries: ' + str(g.getFriesNumber()) + " Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsKKFF(g, h)
#third part
if h.getClarsNumber() > g.getClarsNumber():
if h.getFriesNumber() < g.getFriesNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Clars: ' + str(h.getClarsNumber()) + "graph H: Fries: " + str(h.getFriesNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Clars: ' + str(g.getClarsNumber()) + "graph G: Fries: " + str(g.getFriesNumber()) +" Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsFFCC(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
def findHighestClars(graphs):
clars = 0
for g in graphs:
if g.getClarsNumber() > clars:
clars = g.getClarsNumber()
return clars
def _findRequiredEdges(graphs):
masterSet = getRequiredSet(graphs)
if len(masterSet) > 0:
for edge in masterSet:
v1, v2 = edge
v1.required = True
v2.required = True
return True
else:
return False
def findRequiredEdges(hours=0):
if not os.path.exists("requiredEdges"):
os.mkdir("requiredEdges")
edgeFile = open("requiredEdges/RequiredEdges.txt", "w")
graphNumber = 0
rqNum = 0
flag = False
if hours == 0:
interval = float(raw_input("How many hours would you like to run the program? "))
else:
interval = hours
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
print "graph", graphNumber
flag = False
graph = _createRandomKekulean()
graphs = assignMatching(graph)
for f in graph.getFaceGraph():
pairs = graph.getBondedVertices(f)
print str(pairs)
flag = _findRequiredEdges(graphs)
if flag == True:
print "Found graph with required edges"
edgeFile.write("Graph: " + str(rqNum) + "\n")
edgeFile.write(graph.simpleToString())
edgeFile.write("\n\n")
#save PNG's
fileName = "requiredEdges/Graph" + str(rqNum) + ".png"
saveSinglePNG(graphs[0], fileName)
rqNum += 1
graphNumber += 1
t2 = time.time()
def BreakModule():
global Break
Break = True
def BreakLoop():
global BreakLoop
BreakLoop = True
def combineGraphs(root,interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
graphNumber = 0
superGraphNumber = 0
deletedCount = 0
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT,fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command=text.yview)
storedGraphs = {}
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
text.insert(CURRENT,"graph: " + str(graphNumber) + "\n")
if Break == True:
Break = False
quit.destroy()
break
flag = False
#new stuff
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#end new stuff
Graph.comparison = 'clars'
structures.sort()
randomGraph.maxClars = structures[-1].getClarsNumber()
req_edges = getRequiredSet(structures)
externalEdges = getExternalEdges(req_edges)
if len(externalEdges) > 0:
#add graph and edges to list
storedGraphs[randomGraph] = externalEdges
for g, edges in storedGraphs.items():
complements = getComplements(externalEdges, edges)
for edge, compEdge in complements:
faceA = (edge[0].getFaces() & edge[1].getFaces()).pop()
faceB = (compEdge[0].getFaces() & compEdge[1].getFaces()).pop()
x = faceA.getX() - faceB.getX()
y = faceA.getY() - faceB.getY()
if edge[2] == "TOP_RIGHT" and compEdge[2] == "BOTTOM_LEFT":
newGraph = offsetFaces(g, x, y + 1);
elif edge[2] == "RIGHT" and compEdge[2] == "LEFT":
newGraph = offsetFaces(g, x + 1, y);
elif edge[2] == "TOP_LEFT" and compEdge[2] == "BOTTOM_RIGHT":
newGraph = offsetFaces(g, x + 1, y + 1);
elif edge[2] == "BOTTOM_LEFT" and compEdge[2] == "TOP_RIGHT":
newGraph = offsetFaces(g, x, y - 1);
elif edge[2] == "LEFT" and compEdge[2] == "RIGHT":
newGraph = offsetFaces(g, x - 1, y);
elif edge[2] == "BOTTOM_RIGHT" and compEdge[2] == "TOP_LEFT":
newGraph = offsetFaces(g, x - 1, y - 1);
overlap = checkFaceOverlap(randomGraph, newGraph)
#print overlap
if overlap is False:
faceGraph = combineFaces(randomGraph, newGraph)
faceGraph = adjustForNegatives(faceGraph)
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
#start new stuff
if len(structures) > 0:
#setup folder
folderName = "CombinedTemps"
if not os.path.exists(folderName):
os.mkdir(folderName)
fileName = folderName + "/superGraph.txt"
f = open(folderName + "/superGraph" + str(superGraphNumber) + ".txt" ,'w')
f.write(str(superGraph) + '\n')
f.close()
Graph.comparison = 'clars'
structures.sort()
if not os.path.exists("CombinedGraphs"):
os.mkdir("CombinedGraphs")
folderNameCG = "CombinedGraphs/superGraph" + str(superGraphNumber)
#setup folder
if not os.path.exists(folderNameCG):
os.mkdir(folderNameCG)
superName = folderNameCG + "/superGraph" + str(superGraphNumber) + ".png"
saveSinglePNG(structures[0], superName)
addCombinationsPNG(randomGraph, newGraph,superGraph, superGraphNumber, deletedCount)
superGraphNumber += 1
graphNumber += 1
t2 = time.time()
quit.update_idletasks()
quit.destroy()
def resetCombinedGraphs(root,appInfo,submitGraph,graphNumberEntry,view):
submitGraph.destroy()
view.destroy()
graphNumberEntry.destroy()
def analyzeCombinedGraphsSetup(root,appInfo,path = "CombinedTemps",extension = ".txt"):
runningApps = []
root.geometry("600x400")
graphNumber = IntVar()
entry = Entry(root, textvariable = graphNumber)
entry.pack()
runningApps.append(entry)
if not os.path.exists(path):
os.mkdir(path)
num_files = len([f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))])
num_files -= 1
#for i in range(0,num_files):
#oldFilename = path + "/superGraph" + str(k+1) + extension
#os.rename(oldFilename, path + "/superGraph" + str(i) + extension)
label = Label(root, text="There are " + str(num_files) + " files in the directory. Which wuold you like to look at?")
label.pack()
runningApps.append(label)
i = 0
submit = Button(root, text ="Submit", command = lambda: checkAnalyze(root,appInfo,num_files,quit,entry,label,i,graphNumber,submit,runningApps))
submit.pack(side = BOTTOM)
while i == 0:
i = graphNumber.get()
submit.update_idletasks()
entry.update_idletasks()
label.update_idletasks()
def checkAnalyze(root,appInfo,num_files,quit,entry,label,i,graphNumber,submit,runningApps):
submit.destroy()
again = Label(root, text="That file does not exist, please try again.")
submit = Button(root, text ="Submit", command = lambda: analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry))
submit.pack(side = BOTTOM)
if i < -1 or i > num_files:
again.pack()
else:
analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry)
while (i < -1 or i > num_files):
submit.update_idletasks()
entry.update_idletasks()
label.update_idletasks()
again.update_idletasks()
i = graphNumber.get()
def analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry):
submit.destroy()
again.destroy()
label.destroy()
entry.destroy()
selection = StringVar()
choiceEntry = Entry(root, textvariable = selection)
choice = selection.get()
def callback(root,appInfo,i,choice,selection,choiceEntry,extension = ".txt",path = "CombinedTemps"):
loading = Label(root, text="Analyzing graph data, this may take a few minutes.")
loading.pack()
fileName = "/superGraph" + str(i) + extension
faceGraph = getInput(path + "/superGraph" + str(i) + extension)
#check for connectedness
connected = isConnected(faceGraphToInts(faceGraph))
if connected == True:
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
loading.destroy()
choiceEntry.pack()
typeSelection = Label(root, text="Would you like to view the graphs ranked by Fries or Clars?")
typeSelection.pack()
submit = Button(root, text ="Submit", command = lambda: userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry))
submit.pack(side = BOTTOM)
while True:
choice = selection.get()
flag = False
exit = False
if choice != 'fries' and choice != 'clars' and choice != "":
againSelection = Label(root, text="That file does not exist, please try again.")
againSelection.pack()
print "again"
flag = True
while choice != 'fries' and choice != 'clars':
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
againSelection.update_idletasks()
choice = selection.get()
if exit == True:
againSelection.destroy()
break
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
t = threading.Thread(target = lambda: callback(root,appInfo,i,choice,selection,choiceEntry))
t.setDaemon(True)
appInfo.setThreads(t)
t.start()
def userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry):
structureNumber = IntVar()
submit.destroy()
typeSelection.destroy()
choiceEntry.destroy()
def displayCombinedClarFries(structureNumber,structures,choice):
structures.sort()
if choice == 'clars':
Graph.comparison = 'clars'
elif choice == 'fries':
Graph.comparison = 'fries'
structures.reverse()
structures[structureNumber].displayGraph()
view = Label(root, text="There are " + str(len(structures)) + " distince Kekule structures avaiable. Which would you like to view?")
view.pack()
graphNumberEntry = Entry(root, textvariable = structureNumber)
graphNumberEntry.pack()
number = structureNumber.get()
submitGraph = Button(root, text ="Submit Structure", command = lambda: displayCombinedClarFries(number,structures,choice))
submitGraph.pack(side = BOTTOM)
def deleteB(button):
button.destroy()
reset = Button(root, text ="Quit", command = lambda: resetB(root,appInfo,submitGraph,graphNumberEntry,view))
reset.pack(side = BOTTOM)
def resetB(root,appInfo,submitGraph,graphNumberEntry,view):
deleteB(reset)
resetCombinedGraphs(root,appInfo,submitGraph,graphNumberEntry,view)
def addCombinationsPNG(graph,newGraph,superGraph,superGraphNumber,deletedCount):
new1 = graph.getFaceGraph()
new2 = newGraph.getFaceGraph()
vertexG1 = makeVertexGraph(new1)
vertexG2 = makeVertexGraph(new2)
g1 = Graph(new1,vertexG1)
g2 = Graph(new2,vertexG2)
firstStructures = assignMatching(g1)
secondStructures = assignMatching(g2)
_findRequiredEdges(firstStructures)
_findRequiredEdges(secondStructures)
Graph.comparison = 'clars'
firstStructures.sort()
secondStructures.sort()
if(isKekulean(g2) == True and isKekulean(g1) == True):
folderNameCG = "CombinedGraphs/superGraph" + str(superGraphNumber)
firstName = folderNameCG + "/Graph" + str(1) + ".png"
secondName = folderNameCG + "/Graph" + str(2) + ".png"
saveSinglePNG(firstStructures[0], firstName)
saveSinglePNG(secondStructures[0], secondName)
else:
directoryName = "CombinedDeleted"
if not os.path.exists(directoryName):
os.mkdir(directoryName)
folderName = "CombinedDeleted/superGraph" + str(superGraphNumber) + "_" + str(deletedCount)
if not os.path.exists(folderName):
os.mkdir(folderName)
f = superGraph.getFaceGraph()
v3 = makeVertexGraph(f)
g3 = Graph(f,v3)
superGraphStructure = assignMatching(g3)
fileName = folderName + "/superDeleted" + str(superGraphNumber) + ".png"
firstName = folderName + "/Graph" + str(1) + ".png"
secondName = folderName + "/Graph" + str(2) + ".png"
saveSinglePNG(superGraphStructure[0], fileName)
saveSinglePNG(firstStructures[0], firstName)
saveSinglePNG(secondStructures[0], secondName)
shutil.rmtree("CombinedGraphs/superGraph" + str(superGraphNumber))
superGraphNumber -= 1
deletedCount += 1
def removeCombinedDuplicates(path = "CombinedTemps",extension = ".txt"):
num_files = len([f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))])
print num_files
num_files -= 7
print num_files
masterFaceGraph = []
for i in range(0,num_files):
filename = "/superGraph" + str(i) + extension
faceGraph = getInput(path + "/superGraph" + str(i) + extension)
masterFaceGraph.append(faceGraphToInts(faceGraph))
for f in range(0, len(masterFaceGraph)):
for k in range(f+1, len(masterFaceGraph)):
flag = True
for h in range(0,len(masterFaceGraph[f])):
a = masterFaceGraph[f][h]
b = masterFaceGraph[k][h]
if len(a) != len(b):
flag = False
break
for t in range(0,len(masterFaceGraph[f][h])):
c = a[t]
d = b[t]
if c != d:
flag = False
break
if flag == False:
break
if (flag == True):
masterFaceGraph.remove(masterFaceGraph[k])
shutil.rmtree("CombinedGraphs/superGraph" + str(k))
os.remove("CombinedTemps/superGraph" + str(k) + extension)
for i in range(k+1,num_files):
path1 = "CombinedGraphs"
path2 = "CombinedTemps"
oldFilename1 = path1 + "/superGraph" + str(i)
oldFilename2 = path2 + "/superGraph" + str(i) + extension
os.rename(oldFilename1 + "/superGraph" + str(i) + ".png", oldFilename1 + "/superGraph" + str(i-1) + ".png")
os.rename(oldFilename1, path1 + "/superGraph" + str(i-1))
os.rename(oldFilename2, path2 + "/superGraph" + str(i-1) + extension)
num_files -= 1 | gpl-2.0 |
metacloud/python-novaclient | novaclient/v1_1/aggregates.py | 15 | 3503 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Aggregate interface."""
from novaclient import base
class Aggregate(base.Resource):
"""An aggregates is a collection of compute hosts."""
def __repr__(self):
return "<Aggregate: %s>" % self.id
def update(self, values):
"""Update the name and/or availability zone."""
return self.manager.update(self, values)
def add_host(self, host):
return self.manager.add_host(self, host)
def remove_host(self, host):
return self.manager.remove_host(self, host)
def set_metadata(self, metadata):
return self.manager.set_metadata(self, metadata)
def delete(self):
self.manager.delete(self)
class AggregateManager(base.ManagerWithFind):
resource_class = Aggregate
def list(self):
"""Get a list of os-aggregates."""
return self._list('/os-aggregates', 'aggregates')
def create(self, name, availability_zone):
"""Create a new aggregate."""
body = {'aggregate': {'name': name,
'availability_zone': availability_zone}}
return self._create('/os-aggregates', body, 'aggregate')
def get(self, aggregate):
"""Get details of the specified aggregate."""
return self._get('/os-aggregates/%s' % (base.getid(aggregate)),
"aggregate")
# NOTE:(dtroyer): utils.find_resource() uses manager.get() but we need to
# keep the API backward compatible
def get_details(self, aggregate):
"""Get details of the specified aggregate."""
return self.get(aggregate)
def update(self, aggregate, values):
"""Update the name and/or availability zone."""
body = {'aggregate': values}
return self._update("/os-aggregates/%s" % base.getid(aggregate),
body,
"aggregate")
def add_host(self, aggregate, host):
"""Add a host into the Host Aggregate."""
body = {'add_host': {'host': host}}
return self._create("/os-aggregates/%s/action" % base.getid(aggregate),
body, "aggregate")
def remove_host(self, aggregate, host):
"""Remove a host from the Host Aggregate."""
body = {'remove_host': {'host': host}}
return self._create("/os-aggregates/%s/action" % base.getid(aggregate),
body, "aggregate")
def set_metadata(self, aggregate, metadata):
"""Set a aggregate metadata, replacing the existing metadata."""
body = {'set_metadata': {'metadata': metadata}}
return self._create("/os-aggregates/%s/action" % base.getid(aggregate),
body, "aggregate")
def delete(self, aggregate):
"""Delete the specified aggregates."""
self._delete('/os-aggregates/%s' % (base.getid(aggregate)))
| apache-2.0 |
StratusLab/client | api/code/src/main/python/stratuslab/HttpClient.py | 1 | 9858 | #
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import stat
import base64
import mimetools
import mimetypes
import httplib2
import ssl
from httplib2 import httplib
from time import gmtime, strftime
from stratuslab import Util
from stratuslab.ConfigHolder import ConfigHolder
from stratuslab.Exceptions import ServerException
from stratuslab.Exceptions import ClientException
from stratuslab.Exceptions import NetworkException
class HttpClient(object):
ENV_HTTP_PROXY = 'http_proxy'
ENV_NO_PROXY = 'no_proxy'
@staticmethod
def getHttpProxyForUrl(url):
proxy = None
url_host = Util.parseUri(url)[1]
envProxy = HttpClient._getEnvVarProxy()
if envProxy and not (url_host in HttpClient._getEnvVarNoProxy()):
proxy_server, proxy_port = Util.parseUri(envProxy)[1:3]
proxy = httplib2.ProxyInfo(3, proxy_server, int(proxy_port),
proxy_rdns=True)
return proxy
@staticmethod
def _getEnvVarProxy():
return os.environ.get(HttpClient.ENV_HTTP_PROXY)
@staticmethod
def _getEnvVarNoProxy():
return os.environ.get(HttpClient.ENV_NO_PROXY)
def __init__(self, configHolder=ConfigHolder()):
self.verboseLevel = None
self.configHolder = configHolder
self.crendentials = {}
self.certificates = {}
self.handleResponse = True
self.useHttpCache = False
configHolder.assign(self)
def get(self, url, accept='application/xml'):
return self._httpCall(url, 'GET', accept=accept)
def post(self, url, body=None, contentType='application/xml', accept='application/xml'):
return self._httpCall(url, 'POST', body, contentType, accept, retry=False)
def post_multipart(self, url, files=[], params=[], accept='application/xml'):
boundary, body = self._multipart_encode(files, params)
contentType = 'multipart/form-data; boundary=%s' % boundary
return self.post(url, body, contentType=contentType, accept=accept)
def _multipart_encode(self, files, params):
"files - list of (<attribute name>, <file descriptor>) tuples"
"params - list of (<attribute name>, <value>) tuples"
boundary = mimetools.choose_boundary()
body = ''
for(key, value) in params:
body += '--%s\r\n' % boundary
body += 'Content-Disposition: form-data; name="%s"' % key
body += '\r\n\r\n' + value + '\r\n'
for(key, fh) in files:
file_size = os.fstat(fh.fileno())[stat.ST_SIZE]
filename = fh.name.split('/')[-1]
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
body += '--%s\r\n' % boundary
body += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
body += 'Content-Type: %s\r\n' % contenttype
body += 'Content-Length: %s\r\n' % file_size
fh.seek(0)
body += '\r\n' + fh.read() + '\r\n'
fh.close()
body += '--%s--\r\n\r\n' % boundary
return boundary, body
def put(self, url, body=None, contentType='application/xml', accept='application/xml'):
return self._httpCall(url, 'PUT', body, contentType, accept)
def delete(self, url, body=None, contentType='application/x-www-form-urlencoded', accept='application/xml'):
return self._httpCall(url, 'DELETE', body, contentType, accept)
def head(self, url):
return self._httpCall(url, 'HEAD')
def addCredentials(self, username, password):
self.crendentials[username] = password
def addCertificate(self, key, cert):
self.certificates[key] = cert
def setHandleResponse(self, handle):
self.handleResponse = handle
def _addCredentials(self, http):
for u, p in self.crendentials.items():
http.add_credentials(u, p)
def _addCredentialsToHeader(self, headers):
if self.crendentials:
u, p = self.crendentials.items()[0]
headers['authorization'] = 'Basic ' + base64.b64encode('%s:%s' % (u, p))
return headers
def _addCertificate(self, http):
for u, p in self.certificates.items():
http.add_certificate(u, p, '')
def _printDetail(self, message):
Util.printDetail(message, self.verboseLevel, Util.VERBOSE_LEVEL_DETAILED)
# This is a bandaid for problems associated with dropped SSL handshakes. The
# root cause of these problems needs to be found and fixed.
def _retryHttpRequestOnSSLError(self, httpObject, url, method, body, headers):
maxRetries = 3
retries = 0
lastException = None
while retries < maxRetries:
try:
if len(headers):
return httpObject.request(url, method, body, headers=headers)
else:
return httpObject.request(url, method, body)
except ssl.SSLError as e:
t = strftime("%Y-%m-%d %H:%M:%S", gmtime())
self._printDetail('SSL ERROR ENCOUNTERED (%s): %s' % (t, str(e)))
lastException = e
retries += 1
except httplib2.ssl_SSLError as e:
t = strftime("%Y-%m-%d %H:%M:%S", gmtime())
self._printDetail('SSL ERROR ENCOUNTERED (%s): %s' % (t, str(e)))
lastException = e
retries += 1
raise lastException
def _httpCall(self, url, method, body=None, contentType='application/xml', accept='application/xml', retry=True):
def _convertContent(content):
size = len(content)
if size > 2048:
return '<content too large; %d bytes>' % size
try:
return unicode(content, 'utf-8')
except:
return '<non-text content>'
def _getErrorMessageFromJsonContent(content):
try:
return json.loads(content)['message']
except:
return ''
def _handle3xx(resp):
if resp.status == 302:
# Redirected
resp, content = self._httpCall(resp['location'], method, body, accept)
else:
raise Exception('Should have been handled by httplib2!! ' + str(resp.status) + ": " + resp.reason)
return resp, content
def _handle4xx(resp):
error_message = _getErrorMessageFromJsonContent(content)
raise ClientException('Failed calling method %s on url %s, with reason: %s. Error: %s' %
(method, url, str(resp.status) + ": " + resp.reason, error_message),
content=content,
status=str(resp.status))
def _handle5xx(resp):
if retry:
return self._httpCall(url, method, body, contentType, accept, False)
raise ServerException('Failed calling method %s on url %s, with reason: %s' %
(method, url, str(resp.status) + ": " + resp.reason),
status=str(resp.status))
def _handleResponse(resp, content):
self._printDetail('Received response: %s' % resp + \
'\nwith content:\n %s' % \
_convertContent(content))
if str(resp.status).startswith('2'):
return resp, content
if str(resp.status).startswith('3'):
resp, content = _handle3xx(resp)
if str(resp.status).startswith('4'):
resp, content = _handle4xx(resp)
if str(resp.status).startswith('5'):
resp, content = _handle5xx(resp)
proxy = self.getHttpProxyForUrl(url)
if Util.isTrueConfVal(self.useHttpCache):
h = httplib2.Http(".cache", proxy_info=proxy)
else:
h = httplib2.Http(proxy_info=proxy)
h.force_exception_to_status_code = False
h.disable_ssl_certificate_validation=True
self._printDetail('Contacting the server with %s, at: %s' % (method, url))
headers = {}
if contentType:
headers['Content-Type'] = contentType
if accept:
headers['Accept'] = accept
# See https://github.com/StratusLab/client/issues/8
if method == 'POST':
self._addCredentialsToHeader(headers)
self._addCredentials(h)
self._addCertificate(h)
try:
resp, content = self._retryHttpRequestOnSSLError(h, url, method, body, headers)
except httplib.BadStatusLine:
raise NetworkException('BadStatusLine when contacting ' + url)
except AttributeError:
raise NetworkException('Cannot contact ' + url)
if self.handleResponse:
try:
_handleResponse(resp, content)
except ClientException, ex:
ex.mediaType = headers['Accept']
raise
return resp, content
| apache-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/lib2to3/fixes/fix_dict.py | 24 | 3811 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
d.viewkeys() -> d.keys()
d.viewitems() -> d.items()
d.viewvalues() -> d.values()
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | {"iter"}
class FixDict(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues'|
'viewkeys'|'viewitems'|'viewvalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
isview = method_name.startswith("view")
if isiter or isview:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
| gpl-2.0 |
frinksdev/mini4chan | konnichiwa/settings.py | 1 | 2675 | """
Django settings for konnichiwa project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tmm&bc#^@y6c^^_s&v+v7*(n^7)h8qr+(^19f#ntjx$q$mp#y_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'konnichiwa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'konnichiwa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'klezmer'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-mx'
TIME_ZONE = 'America/Mexico_City'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| mit |
dianshen/python_day | day7/a/getfile.py | 1 | 1625 | #!/usr/bin/env python3
__author__ = 'DSOWASP'
import socket
import os
import hashlib
ip_port = ('127.0.0.1',9999)
sk = socket.socket()
sk.connect(ip_port)
while True:
cmd_input = input("cmd> ")
# print("发送:%s"%cmd)
cmd,fname = cmd_input.split()
if os.path.exists(fname):
ftell = os.path.getsize(fname)
else:
ftell = 0
if cmd == "q":
break
# elif
try:
sk.send(bytes("get %s %s"%(fname,ftell),"utf-8"))
r_data = sk.recv(1024).decode()
# print(r_data)
status,fsize = r_data.split()
fsize = int(fsize)
print(r_data)
if status == "210":
if ftell == 0:
f = open(fname,"wb")
content = sk.recv(1024)
f.write(content)
else:
recv_size = ftell
f = open(fname,'rb')
fmd5 = hashlib.md5()
text = f.read()
fmd5.update(text)
f.close()
f = open(fname,"ab")
while recv_size < fsize:
content = sk.recv(100)
f.write(content)
recv_size += len(content)
fmd5.update(content)
r_fmd5 = sk.recv(1024).decode()
print(r_fmd5)
f_md5 = r_fmd5.split()[2]
if f_md5 == fmd5.hexdigest():
print("文件传输正确!")
f.close()
except ConnectionResetError:
print("server break the connect")
break
# print("recv:%s"%raw_data.decode()) | apache-2.0 |
Insoleet/aiohttp | aiohttp/web_exceptions.py | 12 | 6892 | from .web_reqrep import Response
__all__ = (
'HTTPException',
'HTTPError',
'HTTPRedirection',
'HTTPSuccessful',
'HTTPOk',
'HTTPCreated',
'HTTPAccepted',
'HTTPNonAuthoritativeInformation',
'HTTPNoContent',
'HTTPResetContent',
'HTTPPartialContent',
'HTTPMultipleChoices',
'HTTPMovedPermanently',
'HTTPFound',
'HTTPSeeOther',
'HTTPNotModified',
'HTTPUseProxy',
'HTTPTemporaryRedirect',
'HTTPClientError',
'HTTPBadRequest',
'HTTPUnauthorized',
'HTTPPaymentRequired',
'HTTPForbidden',
'HTTPNotFound',
'HTTPMethodNotAllowed',
'HTTPNotAcceptable',
'HTTPProxyAuthenticationRequired',
'HTTPRequestTimeout',
'HTTPConflict',
'HTTPGone',
'HTTPLengthRequired',
'HTTPPreconditionFailed',
'HTTPRequestEntityTooLarge',
'HTTPRequestURITooLong',
'HTTPUnsupportedMediaType',
'HTTPRequestRangeNotSatisfiable',
'HTTPExpectationFailed',
'HTTPServerError',
'HTTPInternalServerError',
'HTTPNotImplemented',
'HTTPBadGateway',
'HTTPServiceUnavailable',
'HTTPGatewayTimeout',
'HTTPVersionNotSupported',
)
############################################################
# HTTP Exceptions
############################################################
class HTTPException(Response, Exception):
# You should set in subclasses:
# status = 200
status_code = None
empty_body = False
def __init__(self, *, headers=None, reason=None,
body=None, text=None, content_type=None):
Response.__init__(self, status=self.status_code,
headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
Exception.__init__(self, self.reason)
if self.body is None and not self.empty_body:
self.text = "{}: {}".format(self.status, self.reason)
class HTTPError(HTTPException):
"""Base class for exceptions with status codes in the 400s and 500s."""
class HTTPRedirection(HTTPException):
"""Base class for exceptions with status codes in the 300s."""
class HTTPSuccessful(HTTPException):
"""Base class for exceptions with status codes in the 200s."""
class HTTPOk(HTTPSuccessful):
status_code = 200
class HTTPCreated(HTTPSuccessful):
status_code = 201
class HTTPAccepted(HTTPSuccessful):
status_code = 202
class HTTPNonAuthoritativeInformation(HTTPSuccessful):
status_code = 203
class HTTPNoContent(HTTPSuccessful):
status_code = 204
empty_body = True
class HTTPResetContent(HTTPSuccessful):
status_code = 205
empty_body = True
class HTTPPartialContent(HTTPSuccessful):
status_code = 206
############################################################
# 3xx redirection
############################################################
class _HTTPMove(HTTPRedirection):
def __init__(self, location, *, headers=None, reason=None,
body=None, text=None, content_type=None):
if not location:
raise ValueError("HTTP redirects need a location to redirect to.")
super().__init__(headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
self.headers['Location'] = location
self.location = location
class HTTPMultipleChoices(_HTTPMove):
status_code = 300
class HTTPMovedPermanently(_HTTPMove):
status_code = 301
class HTTPFound(_HTTPMove):
status_code = 302
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
class HTTPSeeOther(_HTTPMove):
status_code = 303
class HTTPNotModified(HTTPRedirection):
# FIXME: this should include a date or etag header
status_code = 304
empty_body = True
class HTTPUseProxy(_HTTPMove):
# Not a move, but looks a little like one
status_code = 305
class HTTPTemporaryRedirect(_HTTPMove):
status_code = 307
############################################################
# 4xx client error
############################################################
class HTTPClientError(HTTPError):
pass
class HTTPBadRequest(HTTPClientError):
status_code = 400
class HTTPUnauthorized(HTTPClientError):
status_code = 401
class HTTPPaymentRequired(HTTPClientError):
status_code = 402
class HTTPForbidden(HTTPClientError):
status_code = 403
class HTTPNotFound(HTTPClientError):
status_code = 404
class HTTPMethodNotAllowed(HTTPClientError):
status_code = 405
def __init__(self, method, allowed_methods, *, headers=None, reason=None,
body=None, text=None, content_type=None):
allow = ','.join(sorted(allowed_methods))
super().__init__(headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
self.headers['Allow'] = allow
self.allowed_methods = allowed_methods
self.method = method.upper()
class HTTPNotAcceptable(HTTPClientError):
status_code = 406
class HTTPProxyAuthenticationRequired(HTTPClientError):
status_code = 407
class HTTPRequestTimeout(HTTPClientError):
status_code = 408
class HTTPConflict(HTTPClientError):
status_code = 409
class HTTPGone(HTTPClientError):
status_code = 410
class HTTPLengthRequired(HTTPClientError):
status_code = 411
class HTTPPreconditionFailed(HTTPClientError):
status_code = 412
class HTTPRequestEntityTooLarge(HTTPClientError):
status_code = 413
class HTTPRequestURITooLong(HTTPClientError):
status_code = 414
class HTTPUnsupportedMediaType(HTTPClientError):
status_code = 415
class HTTPRequestRangeNotSatisfiable(HTTPClientError):
status_code = 416
class HTTPExpectationFailed(HTTPClientError):
status_code = 417
############################################################
# 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
class HTTPServerError(HTTPError):
pass
class HTTPInternalServerError(HTTPServerError):
status_code = 500
class HTTPNotImplemented(HTTPServerError):
status_code = 501
class HTTPBadGateway(HTTPServerError):
status_code = 502
class HTTPServiceUnavailable(HTTPServerError):
status_code = 503
class HTTPGatewayTimeout(HTTPServerError):
status_code = 504
class HTTPVersionNotSupported(HTTPServerError):
status_code = 505
| apache-2.0 |
jurajmajor/ltl3tela | Experiments/ltlcross_runner.py | 1 | 23078 | # -*- coding: utf-8 -*-
import subprocess
import sys
import os.path
import re
import math
import spot
from IPython.display import SVG
from datetime import datetime
import pandas as pd
from experiments_lib import hoa_to_spot, dot_to_svg, pretty_print
def bogus_to_lcr(form):
"""Converts a formula as it is printed in ``_bogus.ltl`` file
(uses ``--relabel=abc``) to use ``pnn`` AP names.
"""
args = ['-r0','--relabel=pnn','-f',form]
return subprocess.check_output(["ltlfilt"] + args, universal_newlines=True).strip()
def parse_check_log(log_f):
"""Parses a given log file and locates cases where
sanity checks found some error.
Returns:
bugs: a dict: ``form_id``->``list of error lines``
bogus_forms: a dict: ``form_id``->``form``
tools: a dict: ``tool_id``->``command``
"""
log = open(log_f,'r')
bugs = {}
bogus_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
problem = re.compile('error: .* nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = []
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
bugs[form_id] = f_bugs
bogus_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
f_bugs.append(m_prob.group(0))
log.close()
tools = parse_log_tools(log_f)
return bugs, bogus_forms, tools
def find_log_for(tool_code, form_id, log_f):
"""Returns an array of lines from log for
given tool code (P1,N3,...) and form_id. The
form_id is taken from runner - thus we search for
formula number ``form_id+1``
"""
log = open(log_f,'r')
current_f = -1
formula = re.compile('.*ltl:(\d+): (.*)$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
gather = re.compile('Performing sanity checks and gathering statistics')
output = []
for line in log:
m_form = formula.match(line)
if m_form:
current_f = int(m_form.group(1))
curr_tool = ''
if current_f < form_id+1:
continue
if current_f > form_id+1:
break
m_tool = tool.match(line)
if m_tool:
curr_tool = m_tool.group(1)
if gather.match(line):
curr_tool = 'end'
if curr_tool == tool_code:
output.append(line.strip())
log.close()
return output
def hunt_error_types(log_f):
log = open(log_f,'r')
errors = {}
err_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
problem = re.compile('error: .*')
nonempty = re.compile('error: (.*) is nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = {}
m_tool = tool.match(line)
if m_tool:
tid = m_tool.group(1)
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
errors[form_id] = f_bugs
err_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
prob = m_prob.group(0)
m_bug = nonempty.match(line)
if m_bug:
prob = 'nonempty'
tid = m_bug.group(1)
if prob not in f_bugs:
f_bugs[prob] = []
f_bugs[prob].append(tid)
log.close()
tools = parse_log_tools(log_f)
return errors, err_forms, tools
def parse_log_tools(log_f):
log = open(log_f,'r')
tools = {}
tool = re.compile('.*\[(P\d+)\]: (.*)$')
empty_line = re.compile('^\s$')
for line in log:
m_tool = tool.match(line)
m_empty = empty_line.match(line)
if m_empty:
break
if m_tool:
tid = m_tool.group(1)
tcmd = m_tool.group(2)
tools[tid] = tcmd
log.close()
return tools
class LtlcrossRunner(object):
"""A class for running Spot's `ltlcross` and storing and manipulating
its results. For LTL3HOA it can also draw very weak alternating automata
(VWAA).
Parameters
----------
tools : a dict (String -> String)
The records in the dict of the form ``name : ltlcross_cmd``
>>> tools = {"LTL3HOA" : "ltl3hoa -d -x -i -p 2 -f %f > %O",
>>> "SPOT": : "ltl2tgba"
>>> }
formula_files : a list of strings
paths to files with formulas to be fed to `ltlcross`
res_filename : String
filename to store the ltlcross`s results
cols : list of Strings, default ``['states','edges','transitions']``
names of ltlcross's statistics columns to be recorded
"""
def __init__(self, tools,
formula_files=['formulae/classic.ltl'],
res_filename='na_comp.csv',
cols=['states', 'edges', 'transitions'],
log_file=None,
):
self.tools = tools
self.mins = []
self.f_files = formula_files
self.cols = cols.copy()
self.automata = None
self.values = None
self.form = None
if res_filename == '' or res_filename is None:
self.res_file = '_'.join(tools.keys()) + '.csv'
else:
self.res_file = res_filename
if log_file is None:
self.log_file = self.res_file[:-3] + 'log'
else:
self.log_file = log_file
def create_args(self, automata=True, check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms = True, escape_tools=False):
"""Creates args that are passed to run_ltlcross
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
### Prepare ltlcross command ###
tools_strs = ["{"+name+"}" + cmd for (name, cmd) in self.tools.items() if name in tool_subset]
if escape_tools:
tools_strs = ["'{}'".format(t_str) for t_str in tools_strs]
args = tools_strs
if forms:
args += ' '.join(['-F '+F for F in self.f_files]).split()
if timeout:
args.append('--timeout='+timeout)
if automata:
args.append('--automata')
if save_bogus:
args.append('--save-bogus={}_bogus.ltl'.format(res_file[:-4]))
if not check:
args.append('--no-checks')
#else:
# args.append('--reference={ref_Spot}ltl2tgba -H %f')
args.append('--products=0')
args.append('--csv='+res_file)
return args
def ltlcross_cmd(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms=True, lcr='ltlcross'):
"""Returns ltlcross command for the parameters.
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset, forms,
escape_tools=True)
return ' '.join([lcr] + args)
def run_ltlcross(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
lcr='ltlcross'):
"""Removes any older version of ``self.res_file`` and runs `ltlcross`
on all tools.
Parameters
----------
args : a list of ltlcross arguments that can be used for subprocess
tool_subset : a list of names from self.tools
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset)
# Delete ltlcross result and lof files
subprocess.call(["rm", "-f", res_file, log_file])
## Run ltlcross ##
log = open(log_file,'w')
cmd = self.ltlcross_cmd(args,lcr=lcr)
print(cmd, file=log)
print(datetime.now().strftime('[%d.%m.%Y %T]'), file=log)
print('=====================', file=log,flush=True)
self.returncode = subprocess.call([lcr] + args, stderr=subprocess.STDOUT, stdout=log)
log.writelines([str(self.returncode)+'\n'])
log.close()
def parse_results(self, res_file=None):
"""Parses the ``self.res_file`` and sets the values, automata, and
form. If there are no results yet, it runs ltlcross before.
"""
if res_file is None:
res_file = self.res_file
if not os.path.isfile(res_file):
raise FileNotFoundError(res_file)
res = pd.read_csv(res_file)
# Add incorrect columns to track flawed automata
if not 'incorrect' in res.columns:
res['incorrect'] = False
# Removes unnecessary parenthesis from formulas
res.formula = res['formula'].map(pretty_print)
form = pd.DataFrame(res.formula.drop_duplicates())
form['form_id'] = range(len(form))
form.index = form.form_id
res = form.merge(res)
# Shape the table
table = res.set_index(['form_id', 'formula', 'tool'])
table = table.unstack(2)
table.axes[1].set_names(['column','tool'],inplace=True)
# Create separate tables for automata
automata = None
if 'automaton' in table.columns.levels[0]:
automata = table[['automaton']]
# Removes formula column from the index
automata.index = automata.index.levels[0]
# Removes `automata` from column names -- flatten the index
automata.columns = automata.columns.levels[1]
form = form.set_index(['form_id', 'formula'])
# Store incorrect and exit_status information separately
self.incorrect = table[['incorrect']]
self.incorrect.columns = self.incorrect.columns.droplevel()
self.exit_status = table[['exit_status']]
self.exit_status.columns = self.exit_status.columns.droplevel()
# stores the followed columns only
values = table[self.cols]
self.form = form
self.values = values.sort_index(axis=1,level=['column','tool'])
# self.compute_best("Minimum")
if automata is not None:
self.automata = automata
def compute_sbacc(self,col='states'):
def get_sbacc(aut):
if isinstance(aut, float) and math.isnan(aut):
return None
a = spot.automata(aut+'\n')
aut = next(a)
aut = spot.sbacc(aut)
if col == 'states':
return aut.num_states()
if col == 'acc':
return aut.num_sets()
df = self.automata.copy()
# Recreate the same index as for other cols
n_i = [(l, self.form_of_id(l,False)) for l in df.index]
df.index = pd.MultiIndex.from_tuples(n_i)
df.index.names=['form_id','formula']
# Recreate the same columns hierarchy
df = df.T
df['column'] = 'sb_{}'.format(col)
self.cols.append('sb_{}'.format(col))
df = df.set_index(['column'],append=True)
df = df.T.swaplevel(axis=1)
# Compute the requested values and add them to others
df = df.applymap(get_sbacc)
self.values = self.values.join(df)
def compute_best(self, tools=None, colname="Minimum"):
"""Computes minimum values over tools in ``tools`` for all
formulas and stores them in column ``colname``.
Parameters
----------
tools : list of Strings
column names that are used to compute the min over
colname : String
name of column used to store the computed values
"""
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if t in self.tools.keys()
or t in self.mins]
self.mins.append(colname)
for col in self.cols:
self.values[col, colname] = self.values[col][tools].min(axis=1)
self.values.sort_index(axis=1, level=0, inplace=True)
def aut_for_id(self, form_id, tool):
"""For given formula id and tool it returns the corresponding
non-deterministic automaton as a Spot's object.
Parameters
----------
form_id : int
id of formula to use
tool : String
name of the tool to use to produce the automaton
"""
if self.automata is None:
raise AssertionError("No results parsed yet")
if tool not in self.tools.keys():
raise ValueError(tool)
return hoa_to_spot(self.automata.loc[form_id, tool])
def cummulative(self, col="states"):
"""Returns table with cummulative numbers of given ``col``.
Parameters
---------
col : String
One of the followed columns (``states`` default)
"""
return self.values[col].dropna().sum()
def smaller_than(self, t1, t2, reverse=False,
restrict=True,
col='states', restrict_cols=True):
"""Returns a dataframe with results where ``col`` for ``tool1``
has strictly smaller value than ``col`` for ``tool2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
reverse : Boolean, default ``False``
if ``True``, it switches ``tool1`` and ``tool2``
restrict : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
col : String, default ``'states'``
name of column use for comparison.
restrict_cols : Boolean, default ``True``
if ``True``, show only the compared column
"""
return self.better_than(t1,t2,reverse=reverse,
props=[col],include_fails=False,
restrict_cols=restrict_cols,
restrict_tools=restrict)
def better_than(self, t1, t2, props=['states','acc'],
reverse=False, include_fails=True,
restrict_cols=True,restrict_tools=True
):
"""Compares ``t1`` against ``t2`` lexicographicaly
on cols from ``props`` and returns DataFrame with
results where ``t1`` is better than ``t2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
props : list of Strings, default (['states','acc'])
list of columns on which we want the comparison (in order)
reverse : Boolean, default ``False``
if ``True``, it switches ``t1`` and ``t2``
include_fails : Boolean, default ``True``
if ``True``, include formulae where t2 fails and t1 does not
fail
restrict_cols : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
property columns
restrict_tools : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
"""
if t1 not in list(self.tools.keys())+self.mins:
raise ValueError(t1)
if t2 not in list(self.tools.keys())+self.mins:
raise ValueError(t2)
if reverse:
t1, t2 = t2, t1
v = self.values
t1_ok = self.exit_status[t1] == 'ok'
if include_fails:
t2_ok = self.exit_status[t2] == 'ok'
# non-fail beats fail
c = v[t1_ok & ~t2_ok]
# We work on non-failures only from now on
eq = t1_ok & t2_ok
else:
c = pd.DataFrame()
eq = t1_ok
for prop in props:
# For each prop we add t1 < t2
better = v[prop][t1] < v[prop][t2]
# but only from those which were equivalent so far
equiv_and_better = v.loc[better & eq]
c = c.append(equiv_and_better)
# And now choose those equivalent also on prop to eq
eq = eq & (v[prop][t1] == v[prop][t2])
# format the output
idx = pd.IndexSlice
tools = [t1,t2] if restrict_tools else slice(None)
props = props if restrict_cols else slice(None)
return c.loc[:,idx[props,tools]]
def form_of_id(self, form_id, spot_obj=True):
"""For given form_id returns the formula
Parameters
----------
form_id : int
id of formula to return
spot_obj : Bool
If ``True``, returns Spot formula object (uses Latex to
print the formula in Jupyter notebooks)
"""
f = self.values.index[form_id][1]
if spot_obj:
return spot.formula(f)
return f
def id_of_form(self, f, convert=False):
"""Returns id of a given formula. If ``convert`` is ``True``
it also calls ``bogus_to_lcr`` first.
"""
if convert:
f = bogus_to_lcr(f)
ni = self.values.index.droplevel(0)
return ni.get_loc(f)
def mark_incorrect(self, form_id, tool,output_file=None,input_file=None):
"""Marks automaton given by the formula id and tool as flawed
and writes it into the .csv file
"""
if tool not in self.tools.keys():
raise ValueError(tool)
# Put changes into the .csv file
if output_file is None:
output_file = self.res_file
if input_file is None:
input_file = self.res_file
csv = pd.read_csv(input_file)
if not 'incorrect' in csv.columns:
csv['incorrect'] = False
cond = (csv['formula'].map(pretty_print) ==
pretty_print(self.form_of_id(form_id,False))) &\
(csv.tool == tool)
csv.loc[cond,'incorrect'] = True
csv.to_csv(output_file,index=False)
# Mark the information into self.incorrect
self.incorrect.loc[self.index_for(form_id)][tool] = True
def na_incorrect(self):
"""Marks values for flawed automata as N/A. This causes
that the touched formulae will be removed from cummulative
etc. if computed again. To reverse this information you
have to parse the results again.
It also sets ``exit_status`` to ``incorrect``
"""
self.values = self.values[~self.incorrect]
self.exit_status[self.incorrect] = 'incorrect'
def index_for(self, form_id):
return (form_id,self.form_of_id(form_id,False))
def get_error_count(self,err_type='timeout',drop_zeros=True):
"""Returns a Series with total number of er_type errors for
each tool.
Parameters
----------
err_type : String one of `timeout`, `parse error`,
`incorrect`, `crash`, or
'no output'
Type of error we seek
drop_zeros: Boolean (default True)
If true, rows with zeros are removed
"""
if err_type not in ['timeout', 'parse error',
'incorrect', 'crash',
'no output']:
raise ValueError(err_type)
if err_type == 'crash':
c1 = self.exit_status == 'exit code'
c2 = self.exit_status == 'signal'
res = (c1 | c2).sum()
else:
res = (self.exit_status == err_type).sum()
if drop_zeros:
return res.iloc[res.to_numpy().nonzero()]
return res
def cross_compare(self,tools=None,props=['states','acc'],
include_fails=True, total=True,
include_other=True):
def count_better(tool1,tool2):
if tool1 == tool2:
return float('nan')
try:
return len(self.better_than(tool1,tool2,props,
include_fails=include_fails))
except ValueError as e:
if include_other:
return float('nan')
else:
raise e
if tools is None:
tools = self.tools.keys()
c = pd.DataFrame(index=tools, columns=tools).fillna(0)
for tool in tools:
c[tool] = pd.DataFrame(c[tool]).apply(lambda x:
count_better(x.name,tool), 1)
if total:
c['V'] = c.sum(axis=1)
return c
def min_counts(self, tools=None, restrict_tools=False, unique_only=False, col='states',min_name='min(count)'):
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if
t in self.tools.keys() or
t in self.mins]
min_tools = tools if restrict_tools else list(self.tools.keys())
self.compute_best(tools=min_tools, colname=min_name)
s = self.values.loc(axis=1)[col]
df = s.loc(axis=1)[tools+[min_name]]
is_min = lambda x: x[x == x[min_name]]
best_t_count = df.apply(is_min, axis=1).count(axis=1)
choose = (df[best_t_count == 2]) if unique_only else df
choose = choose.index
min_counts = df.loc[choose].apply(is_min,axis=1).count()
return pd.DataFrame(min_counts[min_counts.index != min_name])
def param_runner(name, tools, data_dir='data_param'):
cols=["states","transitions","acc","time","nondet_states"]
r = LtlcrossRunner(tools,\
res_filename='{}/{}.csv'.format(data_dir,name),\
formula_files=['formulae/{}.ltl'.format(name)],\
cols=cols)
return r
| gpl-3.0 |
gbowerman/azurerm | azurerm/resourcegroups.py | 1 | 4056 | '''resourcegroups.py - azurerm functions for Resource Groups.'''
import json
from .restfns import do_delete, do_get, do_post, do_put
from .settings import get_rm_endpoint, RESOURCE_API
def create_resource_group(access_token, subscription_id, rgname, location):
'''Create a resource group in the specified location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
rg_body = {'location': location}
body = json.dumps(rg_body)
return do_put(endpoint, body, access_token)
def delete_resource_group(access_token, subscription_id, rgname):
'''Delete the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
return do_delete(endpoint, access_token)
def export_template(access_token, subscription_id, rgname):
'''Capture the specified resource group as a template
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/exportTemplate',
'?api-version=', RESOURCE_API])
rg_body = {'options':'IncludeParameterDefaultValue', 'resources':['*']}
body = json.dumps(rg_body)
return do_post(endpoint, body, access_token)
def get_resource_group(access_token, subscription_id, rgname):
'''Get details about the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
def get_resource_group_resources(access_token, subscription_id, rgname):
'''Get the resources in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'/resources?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
def list_resource_groups(access_token, subscription_id):
'''List the resource groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/',
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
| mit |
samesense/tools | py/uniprotTxt.py | 1 | 5230 | """Getting uniprot txt files for proteins
parsing them
loading parsed data."""
import os, time, sys
import files
from collections import defaultdict
def parseProteinName(protein):
with open(mkTxtFile(protein)) as f:
for line in f:
# DE RecName: Full=Fatty acid synthase;
if line[0:2] == 'DE' and 'RecName' in line:
name = line.split(':')[1].split(';')[0].split('=')[1]
return name
elif line[0:2] == 'DE' and 'SubName' in line:
name = line.split(':')[1].split(';')[0].split('=')[1]
return name
elif line[0:2] == 'GE' and 'Name' in line:
name = line.split('Name=')[1].split(';')[0]
return name
def dumpProteinNames(proteins, nameFile):
""" '../working/neal/proteins.names' """
with open(nameFile, 'w') as fout:
for protein in proteins:
print >> fout, '\t'.join((protein, parseProteinName(protein)))
def loadProteinNames(nameFile):
protein2name = {}
with open(nameFile) as f:
for line in f:
protein, name = line.strip('\n').split('\t')
protein2name[protein] = name
return protein2name
def mkTxtFile(proteinID):
txtFile = files.dataDir + '/uniprot/txt/' + proteinID + '.txt'
return txtFile
def mkProteinToProsite(proteins):
protein2prosite = defaultdict(dict)
for protein in proteins:
prosites = parseProsite(protein)
for prosite in prosites:
protein2prosite[protein][prosite] = prosites[prosite]
return protein2prosite
def parseProsite(protein):
prosites = {}
with open(mkTxtFile(protein)) as f:
for line in f:
# DR PROSITE; PS51257; PROKAR_LIPOPROTEIN; 1.
# DR GO; GO:0046872; F:metal ion binding; IEA:UniProtKB-KW.
if ('GO;' in line or 'PROSITE' in line or 'Pfam' in line or 'SMART' in line) and line[0:2] == 'DR':
sp = [x.strip() for x in line.strip('\n').split(';')]
prositeID, prositeName = sp[1:3]
prosites[prositeID] = prositeName
elif line[0:2] == 'KW':
# KW NAD; NADP; Oxidoreductase; Phosphopantetheine; Phosphoprotein;
sp = [x.strip().strip('.') for x in line.strip('\n').split(';')]
for prosite in sp[1:]:
if prosite:
prosites['KW:'+prosite] = 'KW:' + prosite
return prosites
def download(proteinID):
txtFile = mkTxtFile(proteinID)
if not os.path.exists(txtFile):
os.system('wget "http://www.uniprot.org/uniprot/%s.txt" -O %s' % (proteinID, txtFile))
time.sleep(2)
def getProteins():
proteins = {}
with open('../working/neal/proteins.xls') as f:
for line in f:
mod, proteinLs = line.strip('\n').split('\t')
for p in proteinLs.split(';'):
proteins[p] = True
return proteins
def updateTxtFiles():
proteins = getProteins()
for p in proteins:
download(p)
def dumpProsite(proteins, prositeFile):
""" '../working/neal/proteins.prosite' """
protein2prosite = mkProteinToProsite(proteins)
with open(prositeFile, 'w') as fout:
for protein in protein2prosite:
for prosite in protein2prosite[protein]:
print >> fout, '\t'.join( (protein, prosite, protein2prosite[protein][prosite]) )
def loadProteinToProsite(prositeFile):
""" '../working/neal/proteins.prosite' """
protein2prosite = defaultdict(dict)
with open(prositeFile) as f:
for line in f:
protein, prositeID, prositeName = line.strip('\n').split('\t')
protein2prosite[protein][prositeID] = prositeName
return protein2prosite
def loadProteinToGOCC(prositeFile):
""" '../working/neal/proteins.prosite' """
protein2prosite = defaultdict(dict)
with open(prositeFile) as f:
for line in f:
protein, prositeID, prositeName = line.strip('\n').split('\t')
if 'C:' in prositeName:
protein2prosite[protein][prositeID] = prositeName
return protein2prosite
def loadGoCounts(proteinFile, goFile):
"""C,P,F counts for experimental proteins per mod
'../working/neal/proteins.xls'
'../working/neal/proteins.prosite'
"""
proteins = {'SNO':defaultdict(dict),
'RSG':defaultdict(dict),
'SPAL':defaultdict(dict),
'SOH':defaultdict(dict)}
mod2proteins = defaultdict(dict)
with open(proteinFile) as f:
for line in f:
mod, proteinLs = line.strip('\n').split('\t')
for p in proteinLs.split(';'):
mod2proteins[mod][p] = proteinLs
with open(goFile) as f:
for line in f:
protein, prositeID, prositeName = line.strip('\n').split('\t')
goType = prositeName.split(':')[0]
if goType in ('C', 'F', 'P'):
for mod in mod2proteins:
if protein in mod2proteins[mod]:
proteins[mod][goType][mod2proteins[mod][protein]] = True
return proteins
if __name__ == "__main__":
dumpProteinNames()
# dumpProsite()
| mit |
dragosbdi/p2pool | SOAPpy/SOAPBuilder.py | 289 | 22852 | """
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py 1498 2010-03-12 02:13:19Z pooryorick $'
from version import __version__
import cgi
from wstools.XMLname import toXMLname, fromXMLname
import fpconst
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = ( '%(ENV_T)s:Envelope\n' + \
' %(ENV_T)s:encodingStyle="%(ENC)s"\n' ) % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if Config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
#self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
# Save the NS map so that it can be restored when we
# fall out of the scope of the method definition
save_ns_map = ns_map.copy()
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
# End of the method definition; drop any local namespaces
ns_map = save_ns_map
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"\n' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if Config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
self.dump_dispatch(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if Config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_float."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if Config.strict_range:
doubleType(obj)
if fpconst.isPosInf(obj):
obj = "INF"
elif fpconst.isNegInf(obj):
obj = "-INF"
elif fpconst.isNaN(obj):
obj = "NaN"
else:
obj = repr(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(
None, "double", obj, tag, typed, ns_map, self.genroot(ns_map)))
def dump_int(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_int."
self.out.append(self.dumper(None, 'integer', obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_bool(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_bool."
self.out.append(self.dumper(None, 'boolean', obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_string."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
# preserve type if present
if getattr(obj,"_typed",None) and getattr(obj,"_type",None):
if getattr(obj, "_complexType", None):
sample = typedArrayType(typed=obj._type,
complexType = obj._complexType)
sample._typename = obj._type
if not getattr(obj,"_ns",None): obj._ns = NS.URN
else:
sample = typedArrayType(typed=obj._type)
else:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType or \
(isinstance(sample, anyType) and \
(getattr(sample, "_complexType", None) and \
sample._complexType)): # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + str(sample._type)
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: unicode is a SOAP string
if type(sample) == UnicodeType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(
ns_map, self.config.typesNamespaceURI)[0] + typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
if typed:
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
if isinstance(data, (list, tuple, arrayType)):
should_drill = True
else:
should_drill = not same_type
for i in data:
self.dump(i, elemsname, should_drill, ns_map)
if typed: self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_exception(self, obj, tag, typed = 0, ns_map = {}):
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('<%sFault %sroot="1"%s%s>' % (vns, cns, vdecl, cdecl))
self.dump(obj.faultcode, "faultcode", typed, ns_map)
self.dump(obj.faultstring, "faultstring", typed, ns_map)
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_dispatch(self, obj, tag, typed = 1, ns_map = {}):
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
# watch out for order!
dumpmap = (
(Exception, self.dump_exception),
(arrayType, self.dump_list),
(basestring, self.dump_string),
(NoneType, self.dump_None),
(bool, self.dump_bool),
(int, self.dump_int),
(long, self.dump_int),
(list, self.dump_list),
(tuple, self.dump_list),
(dict, self.dump_dictionary),
(float, self.dump_float),
)
for dtype, func in dumpmap:
if isinstance(obj, dtype):
func(obj, tag, typed, ns_map)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
else:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
if hasattr(obj, '_keyord'):
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
d1 = getattr(obj, '__dict__', None)
if d1 is not None:
for (k, v) in d1:
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None,
header=None, methodattrs=None, envelope=1, encoding='UTF-8',
config=Config, noroot = 0):
t = SOAPBuilder(args=args, kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
| gpl-3.0 |
JonnyWong16/plexpy | lib/maxminddb/file.py | 3 | 1988 | """For internal use only. It provides a slice-like file reader."""
import os
try:
# pylint: disable=no-name-in-module
from multiprocessing import Lock
except ImportError:
from threading import Lock
class FileBuffer(object):
"""A slice-able file reader"""
def __init__(self, database):
self._handle = open(database, 'rb')
self._size = os.fstat(self._handle.fileno()).st_size
if not hasattr(os, 'pread'):
self._lock = Lock()
def __getitem__(self, key):
if isinstance(key, slice):
return self._read(key.stop - key.start, key.start)
elif isinstance(key, int):
return self._read(1, key)
else:
raise TypeError("Invalid argument type.")
def rfind(self, needle, start):
"""Reverse find needle from start"""
pos = self._read(self._size - start - 1, start).rfind(needle)
if pos == -1:
return pos
return start + pos
def size(self):
"""Size of file"""
return self._size
def close(self):
"""Close file"""
self._handle.close()
if hasattr(os, 'pread'):
def _read(self, buffersize, offset):
"""read that uses pread"""
# pylint: disable=no-member
return os.pread(self._handle.fileno(), buffersize, offset)
else:
def _read(self, buffersize, offset):
"""read with a lock
This lock is necessary as after a fork, the different processes
will share the same file table entry, even if we dup the fd, and
as such the same offsets. There does not appear to be a way to
duplicate the file table entry and we cannot re-open based on the
original path as that file may have replaced with another or
unlinked.
"""
with self._lock:
self._handle.seek(offset)
return self._handle.read(buffersize)
| gpl-3.0 |
dagwieers/ansible | lib/ansible/modules/packaging/os/svr4pkg.py | 95 | 7684 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svr4pkg
short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Note that this is a very basic packaging system. It will not enforce
dependencies on install or remove.
version_added: "0.9"
author: "Boyd Adamson (@brontitall)"
options:
name:
description:
- Package name, e.g. C(SUNWcsr)
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- If the package is to be installed, then I(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
src:
description:
- Specifies the location to install the package from. Required when C(state=present).
- "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
proxy:
description:
- HTTP[s] proxy to be used if C(src) is a URL.
response_file:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
zone:
description:
- Whether to install the package only in the current zone, or install it into all zones.
- The installation into all zones works only if you are working with the global zone.
required: false
default: "all"
choices: ["current", "all"]
version_added: "1.6"
category:
description:
- Install/Remove category instead of a single package.
required: false
type: bool
version_added: "1.6"
'''
EXAMPLES = '''
# Install a package from an already copied file
- svr4pkg:
name: CSWcommon
src: /tmp/cswpkgs.pkg
state: present
# Install a package directly from an http site
- svr4pkg:
name: CSWpkgutil
src: 'http://get.opencsw.org/now'
state: present
zone: current
# Install a package with a response file
- svr4pkg:
name: CSWggrep
src: /tmp/third-party.pkg
response_file: /tmp/ggrep.response
state: present
# Ensure that a package is not installed.
- svr4pkg:
name: SUNWgnome-sound-recorder
state: absent
# Ensure that a category is not installed.
- svr4pkg:
name: FIREFOX
state: absent
category: true
'''
import os
import tempfile
from ansible.module_utils.basic import AnsibleModule
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
mail=
instance=unique
partial=nocheck
runlevel=quit
idepend=nocheck
rdepend=nocheck
space=quit
setuid=nocheck
conflict=nocheck
action=nocheck
networktimeout=60
networkretries=3
authentication=quit
keystore=/var/sadm/security
proxy=
basedir=default
'''
os.write(desc, fullauto)
os.close(desc)
return filename
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
cmd = ['pkgadd', '-n']
if zone == 'current':
cmd += ['-G']
cmd += ['-a', adminfile, '-d', src]
if proxy is not None:
cmd += ['-x', proxy]
if response_file is not None:
cmd += ['-r', response_file]
if category:
cmd += ['-Y']
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = ['pkgrm', '-na', adminfile, '-Y', name]
else:
cmd = ['pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=True, choices=['present', 'absent']),
src=dict(default=None),
proxy=dict(default=None),
response_file=dict(default=None),
zone=dict(required=False, default='all', choices=['current', 'all']),
category=dict(default=False, type='bool')
),
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
zone = module.params['zone']
category = module.params['category']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
# Returncodes as per pkgadd(1m)
# 0 Successful completion
# 1 Fatal error.
# 2 Warning.
# 3 Interruption.
# 4 Administration.
# 5 Administration. Interaction is required. Do not use pkgadd -n.
# 10 Reboot after installation of all packages.
# 20 Reboot after installation of this package.
# 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
if rc in (0, 2, 3, 10, 20):
result['changed'] = True
# no install nor uninstall, or failed
else:
result['changed'] = False
# rc will be none when the package already was installed and no action took place
# Only return failed=False when the returncode is known to be good as there may be more
# undocumented failure return codes
if rc not in (None, 0, 2, 10, 20):
result['failed'] = True
else:
result['failed'] = False
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
alexandrandronescu/autorobot | curs.py | 1 | 2038 | import sys
import datetime
import requests
def check_arguments(argv):
date1 = date2 = None
today = datetime.datetime.now().date()
if len(argv)==1:
date1 = datetime.date(year=datetime.datetime.now().date().year, month=datetime.datetime.now().date().month, day=1)
date2 = today
try:
if len(argv)>1:
if argv[1]=="today":
print "Today's currency: %s" % get_currency(today)
return
date1 = datetime.datetime.strptime(argv[1], "%Y-%m-%d").date()
if len(argv)>2:
date2 = datetime.datetime.strptime(argv[2], "%Y-%m-%d").date()
else:
date2 = today
if not date1 or not date2:
print "Incorrect dates!"
print "Usage: python curs.py 2013-12-01 2013-12-20"
return None, None
except Exception:
print "Exception while processing the parameters %s" % argv
return None, None
return date1, date2
def get_currency(date):
return float(requests.get('http://www.infovalutar.ro/bnr/%d/%d/%d/EUR' % (date.year, date.month, date.day)).text)
def compute_currency():
# compute the medium value of each day's currency between the specified dates
date1, date2 = check_arguments(sys.argv)
if not date1 or not date2:
return
# order the dates
difference = date2-date1
if difference.days<0:
tmp = date1
date1 = date2
date2 = tmp
difference = date1-date2
#print "Computing the currency between %s and %s (%s days)" % (date1, date2, difference.days)
currency = []
for day in range(difference.days+1):
# www.infovalutar.ro/bnr/2013/11/27/EUR
# print 'www.infovalutar.ro/bnr/%d/%d/%d/EUR' % (date.year, date.month, date.day)
date = date1+datetime.timedelta(days=day)
# add only weekdays
if date.isoweekday() in range(1, 6):
currency.append(get_currency(date))
print "Computing the currency between %s and %s (%s working days/%s total days)" % (date1, date2, len(currency), difference.days)
median_currency = sum(currency)/len(currency)
if len(currency) < 50:
print currency
print "Median currency: %s" % median_currency
if __name__ == "__main__":
compute_currency()
| gpl-2.0 |
gaddman/ansible | lib/ansible/module_utils/vmware.py | 3 | 42109 | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import atexit
import os
import ssl
import time
from random import randint
try:
# requests is required for exception handling of the ConnectionError
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils._text import to_text
from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from
from ansible.module_utils.basic import env_fallback
class TaskError(Exception):
def __init__(self, *args, **kwargs):
super(TaskError, self).__init__(*args, **kwargs)
def wait_for_task(task, max_backoff=64, timeout=3600):
"""Wait for given task using exponential back-off algorithm.
Args:
task: VMware task object
max_backoff: Maximum amount of sleep time in seconds
timeout: Timeout for the given task in seconds
Returns: Tuple with True and result for successful task
Raises: TaskError on failure
"""
failure_counter = 0
start_time = time.time()
while True:
if time.time() - start_time >= timeout:
raise TaskError("Timeout")
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
error_msg = task.info.error
host_thumbprint = None
try:
error_msg = error_msg.msg
if hasattr(task.info.error, 'thumbprint'):
host_thumbprint = task.info.error.thumbprint
except AttributeError:
pass
finally:
raise_from(TaskError(error_msg, host_thumbprint), task.info.error)
if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff)
time.sleep(sleep_time)
failure_counter += 1
def wait_for_vm_ip(content, vm, timeout=300):
facts = dict()
interval = 15
while timeout > 0:
_facts = gather_vm_facts(content, vm)
if _facts['ipv4'] or _facts['ipv6']:
facts = _facts
break
time.sleep(interval)
timeout -= interval
return facts
def find_obj(content, vimtype, name, first=True, folder=None):
container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype)
# Get all objects matching type (and name if given)
obj_list = [obj for obj in container.view if not name or to_text(obj.name) == to_text(name)]
container.Destroy()
# Return first match or None
if first:
if obj_list:
return obj_list[0]
return None
# Return all matching objects or empty list
return obj_list
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
if not isinstance(obj_type, list):
obj_type = [obj_type]
objects = get_all_objs(content, obj_type, folder=folder, recurse=recurse)
for obj in objects:
if obj.name == name:
return obj
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
def find_datacenter_by_name(content, datacenter_name):
return find_object_by_name(content, datacenter_name, [vim.Datacenter])
def get_parent_datacenter(obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
return datacenter
def find_datastore_by_name(content, datastore_name):
return find_object_by_name(content, datastore_name, [vim.Datastore])
def find_dvs_by_name(content, switch_name):
return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch])
def find_hostsystem_by_name(content, hostname):
return find_object_by_name(content, hostname, [vim.HostSystem])
def find_resource_pool_by_name(content, resource_pool_name):
return find_object_by_name(content, resource_pool_name, [vim.ResourcePool])
def find_network_by_name(content, network_name):
return find_object_by_name(content, network_name, [vim.Network])
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None, folder=None, match_first=False):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'uuid':
# Search By BIOS UUID rather than instance UUID
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
elif vm_id_type == 'inventory_path':
searchpath = folder
# get all objects for this path
f_obj = si.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == vm_id:
vm = c_obj
if match_first:
break
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse)
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def compile_folder_path_for_object(vobj):
""" make a /vm/foo/bar/baz like folder path for an object """
paths = []
if isinstance(vobj, vim.Folder):
paths.append(vobj.name)
thisobj = vobj
while hasattr(thisobj, 'parent'):
thisobj = thisobj.parent
try:
moid = thisobj._moId
except AttributeError:
moid = None
if moid in ['group-d1', 'ha-folder-root']:
break
if isinstance(thisobj, vim.Folder):
paths.append(thisobj.name)
paths.reverse()
return '/' + '/'.join(paths)
def _get_vm_prop(vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'hw_datastores': [],
'hw_files': [],
'hw_esxi_host': None,
'hw_guest_ha_state': None,
'hw_is_template': vm.config.template,
'hw_folder': None,
'hw_version': vm.config.version,
'instance_uuid': vm.config.instanceUuid,
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
'guest_question': vm.summary.runtime.question,
'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
'vnc': {},
}
# facts that may or may not exist
if vm.summary.runtime.host:
try:
host = vm.summary.runtime.host
facts['hw_esxi_host'] = host.summary.config.name
except vim.fault.NoPermission:
# User does not have read permission for the host system,
# proceed without this value. This value does not contribute or hamper
# provisioning or power management operations.
pass
if vm.summary.runtime.dasVmProtection:
facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
datastores = vm.datastore
for ds in datastores:
facts['hw_datastores'].append(ds.info.name)
try:
files = vm.config.files
layout = vm.layout
if files:
facts['hw_files'] = [files.vmPathName]
for item in layout.snapshot:
for snap in item.snapshotFile:
facts['hw_files'].append(files.snapshotDirectory + snap)
for item in layout.configFile:
facts['hw_files'].append(os.path.dirname(files.vmPathName) + '/' + item)
for item in vm.layout.logFile:
facts['hw_files'].append(files.logDirectory + item)
for item in vm.layout.disk:
for disk in item.diskFile:
facts['hw_files'].append(disk)
except BaseException:
pass
facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = list(device.ipAddress)
if vm.guest.ipAddress:
if ':' in vm.guest.ipAddress:
facts['ipv6'] = vm.guest.ipAddress
else:
facts['ipv4'] = vm.guest.ipAddress
ethernet_idx = 0
for entry in vm.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
if entry.macAddress:
mac_addr = entry.macAddress
mac_addr_dash = mac_addr.replace(':', '-')
else:
mac_addr = mac_addr_dash = None
if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and
hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')):
port_group_key = entry.backing.port.portgroupKey
port_key = entry.backing.port.portKey
else:
port_group_key = None
port_key = None
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': mac_addr,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': mac_addr_dash,
'summary': entry.deviceInfo.summary,
'portgroup_portkey': port_key,
'portgroup_key': port_group_key,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
facts['vnc'] = get_vnc_extraconfig(vm)
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
snapshot = _get_vm_prop(vm, ('snapshot',))
if not snapshot:
return result
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
if current_snap_obj:
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
else:
result['current_snapshot'] = dict()
return result
def get_vnc_extraconfig(vm):
result = {}
for opts in vm.config.extraConfig:
for optkeyname in ['enabled', 'ip', 'port', 'password']:
if opts.key.lower() == "remotedisplay.vnc." + optkeyname:
result[optkeyname] = opts.value
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str',
required=False,
fallback=(env_fallback, ['VMWARE_HOST']),
),
username=dict(type='str',
aliases=['user', 'admin'],
required=False,
fallback=(env_fallback, ['VMWARE_USER'])),
password=dict(type='str',
aliases=['pass', 'pwd'],
required=False,
no_log=True,
fallback=(env_fallback, ['VMWARE_PASSWORD'])),
port=dict(type='int',
default=443,
fallback=(env_fallback, ['VMWARE_PORT'])),
validate_certs=dict(type='bool',
required=False,
default=True,
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS'])),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
port = module.params.get('port', 443)
validate_certs = module.params['validate_certs']
if not hostname:
module.fail_json(msg="Hostname parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'")
if not username:
module.fail_json(msg="Username parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_USER=ESXI_USERNAME'")
if not password:
module.fail_json(msg="Password parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'")
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or use validate_certs=false.')
ssl_context = None
if not validate_certs and hasattr(ssl, 'SSLContext'):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
service_instance = None
try:
connect_args = dict(
host=hostname,
user=username,
pwd=password,
port=port,
)
if ssl_context:
connect_args.update(sslContext=ssl_context)
service_instance = connect.SmartConnect(**connect_args)
except vim.fault.InvalidLogin as invalid_login:
module.fail_json(msg="Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" % (hostname, port, username, invalid_login.msg))
except vim.fault.NoPermission as no_permission:
module.fail_json(msg="User %s does not have required permission"
" to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg))
except (requests.ConnectionError, ssl.SSLError) as generic_req_exc:
module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc))
except vmodl.fault.InvalidRequest as invalid_request:
# Request is malformed
module.fail_json(msg="Failed to get a response from server %s:%s as "
"request is malformed: %s" % (hostname, port, invalid_request.msg))
except Exception as generic_exc:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" % (hostname, port, generic_exc))
if service_instance is None:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port))
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
def serialize_spec(clonespec):
"""Serialize a clonespec or a relocation spec"""
data = {}
attrs = dir(clonespec)
attrs = [x for x in attrs if not x.startswith('_')]
for x in attrs:
xo = getattr(clonespec, x)
if callable(xo):
continue
xt = type(xo)
if xo is None:
data[x] = None
elif isinstance(xo, vim.vm.ConfigSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.RelocateSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDisk):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
data[x] = to_text(xo)
elif isinstance(xo, vim.Description):
data[x] = {
'dynamicProperty': serialize_spec(xo.dynamicProperty),
'dynamicType': serialize_spec(xo.dynamicType),
'label': serialize_spec(xo.label),
'summary': serialize_spec(xo.summary),
}
elif hasattr(xo, 'name'):
data[x] = to_text(xo) + ':' + to_text(xo.name)
elif isinstance(xo, vim.vm.ProfileSpec):
pass
elif issubclass(xt, list):
data[x] = []
for xe in xo:
data[x].append(serialize_spec(xe))
elif issubclass(xt, string_types + integer_types + (float, bool)):
if issubclass(xt, integer_types):
data[x] = int(xo)
else:
data[x] = to_text(xo)
elif issubclass(xt, bool):
data[x] = xo
elif issubclass(xt, dict):
data[to_text(x)] = {}
for k, v in xo.items():
k = to_text(k)
data[x][k] = serialize_spec(v)
else:
data[x] = str(xt)
return data
def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
dc = find_datacenter_by_name(content, datacenter_name)
if dc is None:
module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
if cluster is None:
module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
for host in cluster.host:
if host.name == host_name:
return host, cluster
return None, cluster
def set_vm_power_state(content, vm, state, force, timeout=0):
"""
Set the power status for a VM determined by the current and
requested states. force is forceful
"""
facts = gather_vm_facts(content, vm)
expected_state = state.replace('_', '').replace('-', '').lower()
current_state = facts['hw_power_status'].lower()
result = dict(
changed=False,
failed=False,
)
# Need Force
if not force and current_state not in ['poweredon', 'poweredoff']:
result['failed'] = True
result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
return result
# State is not already true
if current_state != expected_state:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
elif expected_state == 'poweredon':
task = vm.PowerOn()
elif expected_state == 'restarted':
if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
task = vm.Reset()
else:
result['failed'] = True
result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
elif expected_state == 'suspended':
if current_state in ('poweredon', 'poweringon'):
task = vm.Suspend()
else:
result['failed'] = True
result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
elif expected_state in ['shutdownguest', 'rebootguest']:
if current_state == 'poweredon':
if vm.guest.toolsRunningStatus == 'guestToolsRunning':
if expected_state == 'shutdownguest':
task = vm.ShutdownGuest()
if timeout > 0:
result.update(wait_for_poweroff(vm, timeout))
else:
task = vm.RebootGuest()
# Set result['changed'] immediately because
# shutdown and reboot return None.
result['changed'] = True
else:
result['failed'] = True
result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
else:
result['failed'] = True
result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name
else:
result['failed'] = True
result['msg'] = "Unsupported expected state provided: %s" % expected_state
except Exception as e:
result['failed'] = True
result['msg'] = to_text(e)
if task:
wait_for_task(task)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = task.info.error.msg
else:
result['changed'] = True
# need to get new metadata if changed
result['instance'] = gather_vm_facts(content, vm)
return result
def wait_for_poweroff(vm, timeout=300):
result = dict()
interval = 15
while timeout > 0:
if vm.runtime.powerState.lower() == 'poweredoff':
break
time.sleep(interval)
timeout -= interval
else:
result['failed'] = True
result['msg'] = 'Timeout while waiting for VM power off.'
return result
class PyVmomi(object):
def __init__(self, module):
"""
Constructor
"""
if not HAS_REQUESTS:
module.fail_json(msg="Unable to find 'requests' Python library which is required."
" Please install using 'pip install requests'")
if not HAS_PYVMOMI:
module.fail_json(msg='PyVmomi Python module required. Install using "pip install PyVmomi"')
self.module = module
self.params = module.params
self.si = None
self.current_vm_obj = None
self.content = connect_to_api(self.module)
def is_vcenter(self):
"""
Check if given hostname is vCenter or ESXi host
Returns: True if given connection is with vCenter server
False if given connection is with ESXi server
"""
api_type = None
try:
api_type = self.content.about.apiType
except (vmodl.RuntimeFault, vim.fault.VimFault) as exc:
self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
def get_managed_objects_properties(self, vim_type, properties=None):
"""
Function to look up a Managed Object Reference in vCenter / ESXi Environment
:param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
:param properties: List of properties related to vim object e.g. Name
:return: local content object
"""
# Get Root Folder
root_folder = self.content.rootFolder
if properties is None:
properties = ['name']
# Create Container View with default root folder
mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
# Create Traversal spec
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name="traversal_spec",
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create Property Spec
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=vim_type, # Type of object to retrieved
all=False,
pathSet=properties
)
# Create Object Spec
object_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=mor,
skip=True,
selectSet=[traversal_spec]
)
# Create Filter Spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[object_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
return self.content.propertyCollector.RetrieveContents([filter_spec])
# Virtual Machine related functions
def get_vm(self):
"""
Function to find unique virtual machine either by UUID or Name.
Returns: virtual machine object if found, else None.
"""
vm_obj = None
user_desired_path = None
if self.params['uuid']:
vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
elif self.params['name']:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
vms = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == self.params['name']:
vms.append(temp_vm_object.obj)
break
# get_managed_objects_properties may return multiple virtual machine,
# following code tries to find user desired one depending upon the folder specified.
if len(vms) > 1:
# We have found multiple virtual machines, decide depending upon folder value
if self.params['folder'] is None:
self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, "
"Folder value is a required parameter to find uniqueness "
"of the virtual machine" % self.params['name'],
details="Please see documentation of the vmware_guest module "
"for folder parameter.")
# Get folder path where virtual machine is located
# User provided folder where user thinks virtual machine is present
user_folder = self.params['folder']
# User defined datacenter
user_defined_dc = self.params['datacenter']
# User defined datacenter's object
datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter'])
# Get Path for Datacenter
dcpath = compile_folder_path_for_object(vobj=datacenter_obj)
# Nested folder does not return trailing /
if not dcpath.endswith('/'):
dcpath += '/'
if user_folder in [None, '', '/']:
# User provided blank value or
# User provided only root value, we fail
self.module.fail_json(msg="vmware_guest found multiple virtual machines with same "
"name [%s], please specify folder path other than blank "
"or '/'" % self.params['name'])
elif user_folder.startswith('/vm/'):
# User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance
user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder)
else:
# User defined datacenter is not nested i.e. dcpath = '/' , or
# User defined datacenter is nested i.e. dcpath = '/F0/DC0' or
# User provided folder starts with / and datacenter i.e. folder = /ha-datacenter/ or
# User defined folder starts with datacenter without '/' i.e.
# folder = DC0/vm/india/finance or
# folder = DC0/vm
user_desired_path = user_folder
for vm in vms:
# Check if user has provided same path as virtual machine
actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm)
if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)):
continue
if user_desired_path in actual_vm_folder_path:
vm_obj = vm
break
elif vms:
# Unique virtual machine found.
vm_obj = vms[0]
if vm_obj:
self.current_vm_obj = vm_obj
return vm_obj
def gather_facts(self, vm):
"""
Function to gather facts of virtual machine.
Args:
vm: Name of virtual machine.
Returns: Facts dictionary of the given virtual machine.
"""
return gather_vm_facts(self.content, vm)
@staticmethod
def get_vm_path(content, vm_name):
"""
Function to find the path of virtual machine.
Args:
content: VMware content object
vm_name: virtual machine managed object
Returns: Folder of virtual machine if exists, else None
"""
folder_name = None
folder = vm_name.parent
if folder:
folder_name = folder.name
fp = folder.parent
# climb back up the tree to find our path, stop before the root folder
while fp is not None and fp.name is not None and fp != content.rootFolder:
folder_name = fp.name + '/' + folder_name
try:
fp = fp.parent
except BaseException:
break
folder_name = '/' + folder_name
return folder_name
def get_vm_or_template(self, template_name=None):
"""
Find the virtual machine or virtual machine template using name
used for cloning purpose.
Args:
template_name: Name of virtual machine or virtual machine template
Returns: virtual machine or virtual machine template object
"""
template_obj = None
if not template_name:
return template_obj
if "/" in template_name:
vm_obj_path = os.path.dirname(template_name)
vm_obj_name = os.path.basename(template_name)
template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path)
if template_obj:
return template_obj
else:
template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid")
if template_obj:
return template_obj
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
templates = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == template_name:
templates.append(temp_vm_object.obj)
break
if len(templates) > 1:
# We have found multiple virtual machine templates
self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name)
elif templates:
template_obj = templates[0]
return template_obj
# Cluster related functions
def find_cluster_by_name(self, cluster_name, datacenter_name=None):
"""
Find Cluster by name in given datacenter
Args:
cluster_name: Name of cluster name to find
datacenter_name: (optional) Name of datacenter
Returns: True if found
"""
return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name)
def get_all_hosts_by_cluster(self, cluster_name):
"""
Get all hosts from cluster by cluster name
Args:
cluster_name: Name of cluster
Returns: List of hosts
"""
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
return [host for host in cluster_obj.host]
else:
return []
# Hosts related functions
def find_hostsystem_by_name(self, host_name):
"""
Find Host by name
Args:
host_name: Name of ESXi host
Returns: True if found
"""
return find_hostsystem_by_name(self.content, hostname=host_name)
def get_all_host_objs(self, cluster_name=None, esxi_host_name=None):
"""
Function to get all host system managed object
Args:
cluster_name: Name of Cluster
esxi_host_name: Name of ESXi server
Returns: A list of all host system managed objects, else empty list
"""
host_obj_list = []
if not self.is_vcenter():
hosts = get_all_objs(self.content, [vim.HostSystem]).keys()
if hosts:
host_obj_list.append(list(hosts)[0])
else:
if cluster_name:
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
host_obj_list = [host for host in cluster_obj.host]
else:
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
elif esxi_host_name:
if isinstance(esxi_host_name, str):
esxi_host_name = [esxi_host_name]
for host in esxi_host_name:
esxi_host_obj = self.find_hostsystem_by_name(host_name=host)
if esxi_host_obj:
host_obj_list = [esxi_host_obj]
else:
self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host)
return host_obj_list
# Network related functions
@staticmethod
def find_host_portgroup_by_name(host, portgroup_name):
"""
Find Portgroup on given host
Args:
host: Host config object
portgroup_name: Name of portgroup
Returns: True if found else False
"""
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return False
def get_all_port_groups_by_host(self, host_system):
"""
Function to get all Port Group by host
Args:
host_system: Name of Host System
Returns: List of Port Group Spec
"""
pgs_list = []
for pg in host_system.config.network.portgroup:
pgs_list.append(pg)
return pgs_list
# Datacenter
def find_datacenter_by_name(self, datacenter_name):
"""
Function to get datacenter managed object by name
Args:
datacenter_name: Name of datacenter
Returns: datacenter managed object if found else None
"""
return find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
def find_datastore_by_name(self, datastore_name):
"""
Function to get datastore managed object by name
Args:
datastore_name: Name of datastore
Returns: datastore managed object if found else None
"""
return find_datastore_by_name(self.content, datastore_name=datastore_name)
# Datastore cluster
def find_datastore_cluster_by_name(self, datastore_cluster_name):
"""
Function to get datastore cluster managed object by name
Args:
datastore_cluster_name: Name of datastore cluster
Returns: Datastore cluster managed object if found else None
"""
data_store_clusters = get_all_objs(self.content, [vim.StoragePod])
for dsc in data_store_clusters:
if dsc.name == datastore_cluster_name:
return dsc
return None
| gpl-3.0 |
heke123/chromium-crosswalk | third_party/WebKit/Source/build/scripts/make_cssom_types.py | 6 | 1588 | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import css_properties
import in_generator
from name_utilities import enum_for_css_keyword
import template_expander
class CSSOMTypesWriter(css_properties.CSSProperties):
def __init__(self, in_file_path):
super(CSSOMTypesWriter, self).__init__(in_file_path)
for property in self._properties.values():
types = []
# Expand types
for singleType in property['typedom_types']:
if singleType == 'Length':
types.append('SimpleLength')
types.append('CalcLength')
else:
types.append(singleType)
property['typedom_types'] = types
# Generate Keyword ID values from keywords.
property['keywordIDs'] = map(
enum_for_css_keyword, property['keywords'])
self._outputs = {
'CSSOMTypes.cpp': self.generate_types,
'CSSOMKeywords.cpp': self.generate_keywords,
}
@template_expander.use_jinja('CSSOMTypes.cpp.tmpl')
def generate_types(self):
return {
'properties': self._properties,
}
@template_expander.use_jinja('CSSOMKeywords.cpp.tmpl')
def generate_keywords(self):
return {
'properties': self._properties,
}
if __name__ == '__main__':
in_generator.Maker(CSSOMTypesWriter).main(sys.argv)
| bsd-3-clause |
srsman/odoo | openerp/osv/fields.py | 45 | 75006 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import functools
import logging
import pytz
import re
import xmlrpclib
from operator import itemgetter
from contextlib import contextmanager
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_repr, float_round, frozendict, html_sanitize
import simplejson
from openerp import SUPERUSER_ID, registry
@contextmanager
def _get_cursor():
# yield a valid cursor from any environment or create a new one if none found
from openerp.api import Environment
from openerp.http import request
try:
request.env # force request's env to be computed
except RuntimeError:
pass # ignore if not in a request
for env in Environment.envs:
if not env.cr.closed:
yield env.cr
break
else:
with registry().cursor() as cr:
yield cr
EMPTY_DICT = frozendict()
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
_deprecated = False
__slots__ = [
'copy', # whether value is copied by BaseModel.copy()
'string',
'help',
'required',
'readonly',
'_domain',
'_context',
'states',
'priority',
'change_default',
'size',
'ondelete',
'translate',
'select',
'manual',
'write',
'read',
'selectable',
'group_operator',
'groups', # CSV list of ext IDs of groups
'deprecated', # Optional deprecation warning
'_args',
'_prefetch',
]
def __init__(self, string='unknown', required=False, readonly=False, domain=[], context={}, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
# add parameters and default values
args['copy'] = args.get('copy', True)
args['string'] = string
args['help'] = args.get('help', '')
args['required'] = required
args['readonly'] = readonly
args['_domain'] = domain
args['_context'] = context
args['states'] = states
args['priority'] = priority
args['change_default'] = change_default
args['size'] = size
args['ondelete'] = ondelete.lower() if ondelete else None
args['translate'] = translate
args['select'] = select
args['manual'] = manual
args['write'] = args.get('write', False)
args['read'] = args.get('read', False)
args['selectable'] = args.get('selectable', True)
args['group_operator'] = args.get('group_operator', None)
args['groups'] = args.get('groups', None)
args['deprecated'] = args.get('deprecated', None)
args['_prefetch'] = args.get('_prefetch', True)
self._args = EMPTY_DICT
for key, val in args.iteritems():
setattr(self, key, val)
# prefetch only if _classic_write, not deprecated and not manual
if not self._classic_write or self.deprecated or self.manual:
self._prefetch = False
def __getattr__(self, name):
""" Access a non-slot attribute. """
if name == '_args':
raise AttributeError(name)
try:
return self._args[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
""" Set a slot or non-slot attribute. """
try:
object.__setattr__(self, name, value)
except AttributeError:
if self._args:
self._args[name] = value
else:
self._args = {name: value} # replace EMPTY_DICT
def __delattr__(self, name):
""" Remove a non-slot attribute. """
try:
del self._args[name]
except KeyError:
raise AttributeError(name)
def new(self, _computed_field=False, **args):
""" Return a column like `self` with the given parameters; the parameter
`_computed_field` tells whether the corresponding field is computed.
"""
# memory optimization: reuse self whenever possible; you can reduce the
# average memory usage per registry by 10 megabytes!
column = type(self)(**args)
return self if self.to_field_args() == column.to_field_args() else column
def to_field(self):
""" convert column `self` to a new-style field """
from openerp.fields import Field
return Field.by_type[self._type](column=self, **self.to_field_args())
def to_field_args(self):
""" return a dictionary with all the arguments to pass to the field """
base_items = [
('copy', self.copy),
('index', self.select),
('manual', self.manual),
('string', self.string),
('help', self.help),
('readonly', self.readonly),
('required', self.required),
('states', self.states),
('groups', self.groups),
('change_default', self.change_default),
('deprecated', self.deprecated),
]
truthy_items = filter(itemgetter(1), [
('group_operator', self.group_operator),
('size', self.size),
('ondelete', self.ondelete),
('translate', self.translate),
('domain', self._domain),
('context', self._context),
])
return dict(base_items + truthy_items + self._args.items())
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = bool
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
__slots__ = ['selection']
def __init__(self, string, selection, size=None, **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, size=size, selection=selection, **args)
def to_field_args(self):
args = super(reference, self).to_field_args()
args['selection'] = self.selection
return args
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
names = model.name_get(cr, uid, [int(res_id)], context=context)
return names[0][1] if names else False
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class char(_column):
_type = 'char'
__slots__ = ['_symbol_f', '_symbol_set', '_symbol_set_char']
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
# self._symbol_set_char defined to keep the backward compatibility
self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
__slots__ = []
class html(text):
_type = 'html'
_symbol_c = '%s'
__slots__ = ['_sanitize', '_strip_style', '_symbol_f', '_symbol_set']
def _symbol_set_html(self, value):
if value is None or value is False:
return None
if not self._sanitize:
return value
return html_sanitize(value, strip_style=self._strip_style)
def __init__(self, string='unknown', sanitize=True, strip_style=False, **args):
super(html, self).__init__(string=string, **args)
self._sanitize = sanitize
self._strip_style = strip_style
# symbol_set redefinition because of sanitize specific behavior
self._symbol_f = self._symbol_set_html
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(html, self).to_field_args()
args['sanitize'] = self._sanitize
return args
import __builtin__
def _symbol_set_float(self, x):
result = __builtin__.float(x or 0.0)
digits = self.digits
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_get = lambda self,x: x or 0.0
__slots__ = ['_digits', '_digits_compute', '_symbol_f', '_symbol_set']
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
# synopsis: digits_compute(cr) -> (precision, scale)
self._digits = digits
self._digits_compute = digits_compute
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(float, self).to_field_args()
args['digits'] = self._digits_compute or self._digits
return args
def digits_change(self, cr):
pass
class date(_column):
_type = 'date'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
user = model.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def date_to_datetime(model, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
user_date = DT.datetime.strptime(userdate, tools.DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + DT.timedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
class datetime(_column):
_type = 'datetime'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
user = registry['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
value = datetime.context_timestamp(cr, uid, DT.datetime.strptime(value, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
return tools.ustr(value.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))
class binary(_column):
_type = 'binary'
_classic_read = False
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_c = '%s'
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
__slots__ = ['filters']
def __init__(self, string='unknown', filters=None, **args):
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, filters=filters, **args)
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if not context:
context = {}
if not values:
values = []
res = {}
for i in ids:
val = None
for v in values:
if v['id'] == i:
val = v[name]
break
# If client is requesting only the size of the field, we return it instead
# of the content. Presumably a separate request will be done to read the actual
# content if it's needed at some point.
# TODO: after 6.0 we should consider returning a dict with size and content instead of
# having an implicit convention for the value
if val and context.get('bin_size_%s' % name, context.get('bin_size')):
res[i] = tools.human_size(long(val))
else:
res[i] = val
return res
class selection(_column):
_type = 'selection'
__slots__ = ['selection']
def __init__(self, selection, string='unknown', **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, selection=selection, **args)
def to_field_args(self):
args = super(selection, self).to_field_args()
args['selection'] = self.selection
return args
@classmethod
def reify(cls, cr, uid, model, field, context=None):
""" Munges the field's ``selection`` attribute as necessary to get
something useable out of it: calls it if it's a function, applies
translations to labels if it's not.
A callable ``selection`` is considered translated on its own.
:param orm.Model model:
:param _column field:
"""
if callable(field.selection):
return field.selection(model, cr, uid, context)
if not (context and 'lang' in context):
return field.selection
# field_to_dict isn't given a field name, only a field object, we
# need to get the name back in order to perform the translation lookup
field_name = next(
name for name, column in model._columns.iteritems()
if column == field)
translation_filter = "%s,%s" % (model._name, field_name)
translate = functools.partial(
model.pool['ir.translation']._get_source,
cr, uid, translation_filter, 'selection', context['lang'])
return [
(value, translate(label))
for value, label in field.selection
]
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = ['_obj', '_auto_join']
def __init__(self, obj, string='unknown', auto_join=False, **args):
args['ondelete'] = args.get('ondelete', 'set null')
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def to_field_args(self):
args = super(many2one, self).to_field_args()
args['comodel_name'] = self._obj
args['auto_join'] = self._auto_join
return args
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool[self._obj]
self._table = obj._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_type = 'one2many'
__slots__ = ['_obj', '_fields_id', '_limit', '_auto_join']
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
# one2many columns are not copied by default
args['copy'] = args.get('copy', False)
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def to_field_args(self):
args = super(one2many, self).to_field_args()
args['comodel_name'] = self._obj
args['inverse_name'] = self._fields_id
args['auto_join'] = self._auto_join
args['limit'] = self._limit
return args
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if self._context:
context = dict(context or {})
context.update(self._context)
# retrieve the records in the comodel
comodel = obj.pool[self._obj].browse(cr, user, [], context)
inverse = self._fields_id
domain = self._domain(obj) if callable(self._domain) else self._domain
domain = domain + [(inverse, 'in', ids)]
records = comodel.search(domain, limit=self._limit)
result = {id: [] for id in ids}
# read the inverse of records without prefetching other fields on them
for record in records.with_context(prefetch_fields=False):
# record[inverse] may be a record or an integer
result[int(record[inverse])].append(record.id)
return result
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
context = dict(context or {})
context.update(self._context)
if not values:
return
obj = obj.pool[self._obj]
rec = obj.browse(cr, user, [], context=context)
with rec.env.norecompute():
_table = obj._table
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# table of the field (parent_model in case of inherit)
field = obj.pool[self._obj]._fields[self._fields_id]
field_model = field.base_field.model_name
field_table = obj.pool[field_model]._table
cr.execute("select 1 from {0} where id=%s and {1}=%s".format(field_table, self._fields_id), (act[1], id))
if not cr.fetchone():
# Must use write() to recompute parent_store structure if needed and check access rules
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool[self._obj].name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_type = 'many2many'
__slots__ = ['_obj', '_rel', '_id1', '_id2', '_limit', '_auto_join']
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
self._auto_join = False
def to_field_args(self):
args = super(many2many, self).to_field_args()
args['comodel_name'] = self._obj
args['relation'] = self._rel
args['column1'] = self._id1
args['column2'] = self._id2
args['limit'] = self._limit
return args
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool[self._obj]
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
FROM %(rel)s, %(from_c)s \
WHERE %(rel)s.%(id1)s IN %%s \
AND %(rel)s.%(id2)s = %(tbl)s.id \
%(where_c)s \
%(order_by)s \
%(limit)s \
OFFSET %(offset)d' \
% values
return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool[self._obj]
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
order_by = obj._generate_order_by(None, wquery)
from_c, where_c, where_params = wquery.get_sql()
if where_c:
where_c = ' AND ' + where_c
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}, where_params)
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
if size < 12: # suppose human size
return value
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_properties = True
__slots__ = [
'_type',
'_classic_read',
'_classic_write',
'_symbol_c',
'_symbol_f',
'_symbol_set',
'_symbol_get',
'_fnct',
'_arg',
'_fnct_inv',
'_fnct_inv_arg',
'_fnct_search',
'_multi',
'store',
'_digits',
'_digits_compute',
'selection',
'_obj',
]
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
self._classic_read = False
self._classic_write = False
self._prefetch = False
self._symbol_c = '%s'
self._symbol_f = _symbol_set
self._symbol_set = (self._symbol_c, self._symbol_f)
self._symbol_get = None
# pop attributes that should not be assigned to self
self._digits = args.pop('digits', (16,2))
self._digits_compute = args.pop('digits_compute', None)
self._obj = args.pop('relation', obj)
# function fields are not copied by default
args['copy'] = args.get('copy', False)
_column.__init__(self, **args)
self._type = type
self._fnct = fnct
self._arg = arg
self._fnct_inv = fnct_inv
self._fnct_inv_arg = fnct_inv_arg
self._fnct_search = fnct_search
self.store = store
self._multi = multi
if not fnct_inv:
self.readonly = 1
if not fnct_search and not store:
self.selectable = False
if callable(args.get('selection')):
from openerp import api
self.selection = api.expected(api.cr_uid_context, args['selection'])
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
else:
self._prefetch = True
if type == 'char':
self._symbol_c = char._symbol_c
self._symbol_f = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
elif type == 'float':
self._symbol_c = float._symbol_c
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
else:
type_class = globals().get(type)
if type_class is not None:
self._symbol_c = type_class._symbol_c
self._symbol_f = type_class._symbol_f
self._symbol_set = type_class._symbol_set
def new(self, _computed_field=False, **args):
if _computed_field:
# field is computed, we need an instance of a non-function column
type_class = globals()[self._type]
return type_class(**args)
else:
# HACK: function fields are tricky to recreate, simply return a copy
import copy
return copy.copy(self)
def to_field_args(self):
args = super(function, self).to_field_args()
args['store'] = bool(self.store)
if self._type in ('float',):
args['digits'] = self._digits_compute or self._digits
elif self._type in ('selection', 'reference'):
args['selection'] = self.selection
elif self._type in ('many2one', 'one2many', 'many2many'):
args['comodel_name'] = self._obj
return args
def digits_change(self, cr):
pass
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
return self._postprocess_batch(cr, uid, obj, field, {0: value}, context=context)[0]
def _postprocess_batch(self, cr, uid, obj, field, values, context=None):
if not values:
return values
if context is None:
context = {}
field_type = obj._columns[field]._type
new_values = dict(values)
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
for rid, value in values.iteritems():
if value:
new_values[rid] = get_nice_size(value)
elif not context.get('bin_raw'):
for rid, value in values.iteritems():
if value:
new_values[rid] = sanitize_binary_value(value)
return new_values
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
multi = self._multi
# if we already have a value, don't recompute it.
# This happen if case of stored many2one fields
if values and not multi and name in values[0]:
result = dict((v['id'], v[name]) for v in values)
elif values and multi and all(n in values[0] for n in name):
result = dict((v['id'], dict((n, v[n]) for n in name)) for v in values)
else:
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
if multi:
swap = {}
for rid, values in result.iteritems():
for f, v in values.iteritems():
if f not in name:
continue
swap.setdefault(f, {})[rid] = v
for field, values in swap.iteritems():
new_values = self._postprocess_batch(cr, uid, obj, field, values, context)
for rid, value in new_values.iteritems():
result[rid][field] = value
else:
result = self._postprocess_batch(cr, uid, obj, name, result, context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
__slots__ = ['arg', '_relations']
def _related_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _related_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for instance in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
instance = instance[field][:1]
if instance:
# write on the last field of the target record
instance.write({self.arg[-1]: values})
def _related_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
# traverse all fields except the last one
for field in self.arg[:-1]:
value = value[field][:1]
# read the last field on the target record
res[record.id] = value[self.arg[-1]]
if self._type == 'many2one':
# res[id] is a recordset; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool[self._obj].name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, bool(value) and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a recordset; convert it to a list of ids
res = dict((id, value.ids) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._related_read, arg, self._related_write, fnct_inv_arg=arg, fnct_search=self._related_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
__slots__ = ['serialization_field']
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
if not value:
return []
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool[self.relation]
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _sparse_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _sparse_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool[obj._columns[field_name].relation]
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool[obj._columns[field_name].relation]
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._sparse_read, fnct_inv=self._sparse_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
__slots__ = ['arg', '_relations']
def _dummy_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _dummy_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _dummy_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._dummy_read, arg, self._dummy_write, fnct_inv_arg=arg, fnct_search=self._dummy_search, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
_type = 'serialized'
__slots__ = []
def _symbol_set_struct(val):
return simplejson.dumps(val)
def _symbol_get_struct(self, val):
return simplejson.loads(val or '{}')
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
def __init__(self, *args, **kwargs):
kwargs['_prefetch'] = kwargs.get('_prefetch', False)
super(serialized, self).__init__(*args, **kwargs)
# TODO: review completly this class for speed improvement
class property(function):
__slots__ = []
def to_field_args(self):
args = super(property, self).to_field_args()
args['company_dependent'] = True
return args
def _property_search(self, tobj, cr, uid, obj, name, domain, context=None):
ir_property = obj.pool['ir.property']
result = []
for field, operator, value in domain:
result += ir_property.search_multi(cr, uid, name, tobj._name, operator, value, context=context)
return result
def _property_write(self, obj, cr, uid, id, prop_name, value, obj_dest, context=None):
ir_property = obj.pool['ir.property']
ir_property.set_multi(cr, uid, prop_name, obj._name, {id: value}, context=context)
return True
def _property_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
ir_property = obj.pool['ir.property']
res = {id: {} for id in ids}
for prop_name in prop_names:
field = obj._fields[prop_name]
values = ir_property.get_multi(cr, uid, prop_name, obj._name, ids, context=context)
if field.type == 'many2one':
# name_get the non-null values as SUPERUSER_ID
vals = sum(set(filter(None, values.itervalues())),
obj.pool[field.comodel_name].browse(cr, uid, [], context=context))
vals_name = dict(vals.sudo().name_get()) if vals else {}
for id, value in values.iteritems():
ng = False
if value and value.id in vals_name:
ng = value.id, vals_name[value.id]
res[id][prop_name] = ng
else:
for id, value in values.iteritems():
res[id][prop_name] = value
return res
def __init__(self, **args):
if 'view_load' in args:
_logger.warning("view_load attribute is deprecated on ir.fields. Args: %r", args)
args = dict(args)
args['obj'] = args.pop('relation', '') or args.get('obj', '')
super(property, self).__init__(
fnct=self._property_read,
fnct_inv=self._property_write,
fnct_search=self._property_search,
multi='properties',
**args
)
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
__slots__ = ['name', 'column', 'parent_model', 'parent_column', 'original_parent']
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
a-parhom/edx-platform | common/lib/capa/capa/tests/test_answer_pool.py | 37 | 27075 | """
Tests the logic of the "answer-pool" attribute, e.g.
<choicegroup answer-pool="4">
"""
import unittest
import textwrap
from capa.tests.helpers import test_capa_system, new_loncapa_problem
from capa.responsetypes import LoncapaProblemError
class CapaAnswerPoolTest(unittest.TestCase):
"""Capa Answer Pool Test"""
def setUp(self):
super(CapaAnswerPoolTest, self).setUp()
self.system = test_capa_system()
# XML problem setup used by a few tests.
common_question_xml = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
def test_answer_pool_4_choices_1_multiplechoiceresponse_seed1(self):
problem = new_loncapa_problem(self.common_question_xml, seed=723)
the_html = problem.get_html()
# [('choice_3', u'wrong-3'), ('choice_5', u'correct-2'), ('choice_1', u'wrong-2'), ('choice_4', u'wrong-4')]
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(response.has_answerpool())
self.assertEqual(response.unmask_order(), ['choice_3', 'choice_5', 'choice_1', 'choice_4'])
def test_answer_pool_4_choices_1_multiplechoiceresponse_seed2(self):
problem = new_loncapa_problem(self.common_question_xml, seed=9)
the_html = problem.get_html()
# [('choice_0', u'wrong-1'), ('choice_4', u'wrong-4'), ('choice_3', u'wrong-3'), ('choice_2', u'correct-1')]
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(hasattr(response, 'has_answerpool'))
self.assertEqual(response.unmask_order(), ['choice_0', 'choice_4', 'choice_3', 'choice_2'])
def test_no_answer_pool_4_choices_1_multiplechoiceresponse(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_0_answer_pool_4_choices_1_multiplechoiceresponse(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="0">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_invalid_answer_pool_value(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2.3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "answer-pool"):
new_loncapa_problem(xml_str)
def test_invalid_answer_pool_none_correct(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="false">wrong!!</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
new_loncapa_problem(xml_str)
def test_invalid_answer_pool_all_correct(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="true">!wrong-1</choice>
<choice correct="true">!wrong-2</choice>
<choice correct="true">!wrong-3</choice>
<choice correct="true">!wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
new_loncapa_problem(xml_str)
def test_answer_pool_5_choices_1_multiplechoiceresponse_seed1(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="5">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'correct-2'.*'wrong-1'.*'wrong-2'.*.*'wrong-3'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertEqual(response.unmask_order(), ['choice_5', 'choice_0', 'choice_1', 'choice_3', 'choice_4'])
def test_answer_pool_2_multiplechoiceresponses_seed1(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-2'.*'wrong-1'.*'correct-2'.*\].*</div>" # rng shared
# str2 = r"<div>.*\[.*'correct-2'.*'wrong-2'.*'wrong-3'.*\].*</div>" # rng independent
str3 = r"<div>\{.*'1_solution_2'.*\}</div>"
str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_2_multiplechoiceresponses_seed2(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=9)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-2'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>"
str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_random_consistent(self):
"""
The point of this test is to make sure that the exact randomization
per seed does not change.
"""
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
str1 = (r"<div>.*\[.*'correct-2'.*'wrong-2'.*\].*</div>.*" +
r"<div>.*\[.*'wrong-1'.*'correct-2'.*'wrong-4'.*\].*</div>.*" +
r"<div>.*\[.*'correct-1'.*'wrong-4'.*\].*</div>.*" +
r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*\].*</div>")
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1)
def test_no_answer_pool(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
self.assertRegexpMatches(the_html, str1)
# attributes *not* present
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_answer_pool_and_no_answer_pool(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
str4 = r"<div>\{.*'1_solution_3'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_without_solutionset(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
| agpl-3.0 |
ahu-odoo/odoo | addons/account_voucher/report/__init__.py | 378 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_voucher_sales_receipt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
leppa/home-assistant | homeassistant/components/auth/__init__.py | 3 | 17184 | """Component to allow users to login and get tokens.
# POST /auth/token
This is an OAuth2 endpoint for granting tokens. We currently support the grant
types "authorization_code" and "refresh_token". Because we follow the OAuth2
spec, data should be send in formatted as x-www-form-urlencoded. Examples will
be in JSON as it's more readable.
## Grant type authorization_code
Exchange the authorization code retrieved from the login flow for tokens.
{
"client_id": "https://hassbian.local:8123/",
"grant_type": "authorization_code",
"code": "411ee2f916e648d691e937ae9344681e"
}
Return value will be the access and refresh tokens. The access token will have
a limited expiration. New access tokens can be requested using the refresh
token.
{
"access_token": "ABCDEFGH",
"expires_in": 1800,
"refresh_token": "IJKLMNOPQRST",
"token_type": "Bearer"
}
## Grant type refresh_token
Request a new access token using a refresh token.
{
"client_id": "https://hassbian.local:8123/",
"grant_type": "refresh_token",
"refresh_token": "IJKLMNOPQRST"
}
Return value will be a new access token. The access token will have
a limited expiration.
{
"access_token": "ABCDEFGH",
"expires_in": 1800,
"token_type": "Bearer"
}
## Revoking a refresh token
It is also possible to revoke a refresh token and all access tokens that have
ever been granted by that refresh token. Response code will ALWAYS be 200.
{
"token": "IJKLMNOPQRST",
"action": "revoke"
}
# Websocket API
## Get current user
Send websocket command `auth/current_user` will return current user of the
active websocket connection.
{
"id": 10,
"type": "auth/current_user",
}
The result payload likes
{
"id": 10,
"type": "result",
"success": true,
"result": {
"id": "USER_ID",
"name": "John Doe",
"is_owner": true,
"credentials": [{
"auth_provider_type": "homeassistant",
"auth_provider_id": null
}],
"mfa_modules": [{
"id": "totp",
"name": "TOTP",
"enabled": true
}]
}
}
## Create a long-lived access token
Send websocket command `auth/long_lived_access_token` will create
a long-lived access token for current user. Access token will not be saved in
Home Assistant. User need to record the token in secure place.
{
"id": 11,
"type": "auth/long_lived_access_token",
"client_name": "GPS Logger",
"lifespan": 365
}
Result will be a long-lived access token:
{
"id": 11,
"type": "result",
"success": true,
"result": "ABCDEFGH"
}
"""
from datetime import timedelta
import logging
import uuid
from aiohttp import web
import voluptuous as vol
from homeassistant.auth.models import (
TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
Credentials,
User,
)
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_REAL_IP
from homeassistant.components.http.auth import async_sign_path
from homeassistant.components.http.ban import log_invalid_auth
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from . import indieauth, login_flow, mfa_setup_flow
DOMAIN = "auth"
WS_TYPE_CURRENT_USER = "auth/current_user"
SCHEMA_WS_CURRENT_USER = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_CURRENT_USER}
)
WS_TYPE_LONG_LIVED_ACCESS_TOKEN = "auth/long_lived_access_token"
SCHEMA_WS_LONG_LIVED_ACCESS_TOKEN = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
vol.Required("lifespan"): int, # days
vol.Required("client_name"): str,
vol.Optional("client_icon"): str,
}
)
WS_TYPE_REFRESH_TOKENS = "auth/refresh_tokens"
SCHEMA_WS_REFRESH_TOKENS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_REFRESH_TOKENS}
)
WS_TYPE_DELETE_REFRESH_TOKEN = "auth/delete_refresh_token"
SCHEMA_WS_DELETE_REFRESH_TOKEN = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_DELETE_REFRESH_TOKEN,
vol.Required("refresh_token_id"): str,
}
)
WS_TYPE_SIGN_PATH = "auth/sign_path"
SCHEMA_WS_SIGN_PATH = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_SIGN_PATH,
vol.Required("path"): str,
vol.Optional("expires", default=30): int,
}
)
RESULT_TYPE_CREDENTIALS = "credentials"
RESULT_TYPE_USER = "user"
_LOGGER = logging.getLogger(__name__)
@bind_hass
def create_auth_code(hass, client_id: str, user: User) -> str:
"""Create an authorization code to fetch tokens."""
return hass.data[DOMAIN](client_id, user)
async def async_setup(hass, config):
"""Component to allow users to login."""
store_result, retrieve_result = _create_auth_code_store()
hass.data[DOMAIN] = store_result
hass.http.register_view(TokenView(retrieve_result))
hass.http.register_view(LinkUserView(retrieve_result))
hass.components.websocket_api.async_register_command(
WS_TYPE_CURRENT_USER, websocket_current_user, SCHEMA_WS_CURRENT_USER
)
hass.components.websocket_api.async_register_command(
WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
websocket_create_long_lived_access_token,
SCHEMA_WS_LONG_LIVED_ACCESS_TOKEN,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_REFRESH_TOKENS, websocket_refresh_tokens, SCHEMA_WS_REFRESH_TOKENS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_DELETE_REFRESH_TOKEN,
websocket_delete_refresh_token,
SCHEMA_WS_DELETE_REFRESH_TOKEN,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SIGN_PATH, websocket_sign_path, SCHEMA_WS_SIGN_PATH
)
await login_flow.async_setup(hass, store_result)
await mfa_setup_flow.async_setup(hass)
return True
class TokenView(HomeAssistantView):
"""View to issue or revoke tokens."""
url = "/auth/token"
name = "api:auth:token"
requires_auth = False
cors_allowed = True
def __init__(self, retrieve_user):
"""Initialize the token view."""
self._retrieve_user = retrieve_user
@log_invalid_auth
async def post(self, request):
"""Grant a token."""
hass = request.app["hass"]
data = await request.post()
grant_type = data.get("grant_type")
# IndieAuth 6.3.5
# The revocation endpoint is the same as the token endpoint.
# The revocation request includes an additional parameter,
# action=revoke.
if data.get("action") == "revoke":
return await self._async_handle_revoke_token(hass, data)
if grant_type == "authorization_code":
return await self._async_handle_auth_code(
hass, data, str(request[KEY_REAL_IP])
)
if grant_type == "refresh_token":
return await self._async_handle_refresh_token(
hass, data, str(request[KEY_REAL_IP])
)
return self.json({"error": "unsupported_grant_type"}, status_code=400)
async def _async_handle_revoke_token(self, hass, data):
"""Handle revoke token request."""
# OAuth 2.0 Token Revocation [RFC7009]
# 2.2 The authorization server responds with HTTP status code 200
# if the token has been revoked successfully or if the client
# submitted an invalid token.
token = data.get("token")
if token is None:
return web.Response(status=200)
refresh_token = await hass.auth.async_get_refresh_token_by_token(token)
if refresh_token is None:
return web.Response(status=200)
await hass.auth.async_remove_refresh_token(refresh_token)
return web.Response(status=200)
async def _async_handle_auth_code(self, hass, data, remote_addr):
"""Handle authorization code request."""
client_id = data.get("client_id")
if client_id is None or not indieauth.verify_client_id(client_id):
return self.json(
{"error": "invalid_request", "error_description": "Invalid client id"},
status_code=400,
)
code = data.get("code")
if code is None:
return self.json(
{"error": "invalid_request", "error_description": "Invalid code"},
status_code=400,
)
user = self._retrieve_user(client_id, RESULT_TYPE_USER, code)
if user is None or not isinstance(user, User):
return self.json(
{"error": "invalid_request", "error_description": "Invalid code"},
status_code=400,
)
# refresh user
user = await hass.auth.async_get_user(user.id)
if not user.is_active:
return self.json(
{"error": "access_denied", "error_description": "User is not active"},
status_code=403,
)
refresh_token = await hass.auth.async_create_refresh_token(user, client_id)
access_token = hass.auth.async_create_access_token(refresh_token, remote_addr)
return self.json(
{
"access_token": access_token,
"token_type": "Bearer",
"refresh_token": refresh_token.token,
"expires_in": int(
refresh_token.access_token_expiration.total_seconds()
),
}
)
async def _async_handle_refresh_token(self, hass, data, remote_addr):
"""Handle authorization code request."""
client_id = data.get("client_id")
if client_id is not None and not indieauth.verify_client_id(client_id):
return self.json(
{"error": "invalid_request", "error_description": "Invalid client id"},
status_code=400,
)
token = data.get("refresh_token")
if token is None:
return self.json({"error": "invalid_request"}, status_code=400)
refresh_token = await hass.auth.async_get_refresh_token_by_token(token)
if refresh_token is None:
return self.json({"error": "invalid_grant"}, status_code=400)
if refresh_token.client_id != client_id:
return self.json({"error": "invalid_request"}, status_code=400)
access_token = hass.auth.async_create_access_token(refresh_token, remote_addr)
return self.json(
{
"access_token": access_token,
"token_type": "Bearer",
"expires_in": int(
refresh_token.access_token_expiration.total_seconds()
),
}
)
class LinkUserView(HomeAssistantView):
"""View to link existing users to new credentials."""
url = "/auth/link_user"
name = "api:auth:link_user"
def __init__(self, retrieve_credentials):
"""Initialize the link user view."""
self._retrieve_credentials = retrieve_credentials
@RequestDataValidator(vol.Schema({"code": str, "client_id": str}))
async def post(self, request, data):
"""Link a user."""
hass = request.app["hass"]
user = request["hass_user"]
credentials = self._retrieve_credentials(
data["client_id"], RESULT_TYPE_CREDENTIALS, data["code"]
)
if credentials is None:
return self.json_message("Invalid code", status_code=400)
await hass.auth.async_link_user(user, credentials)
return self.json_message("User linked")
@callback
def _create_auth_code_store():
"""Create an in memory store."""
temp_results = {}
@callback
def store_result(client_id, result):
"""Store flow result and return a code to retrieve it."""
if isinstance(result, User):
result_type = RESULT_TYPE_USER
elif isinstance(result, Credentials):
result_type = RESULT_TYPE_CREDENTIALS
else:
raise ValueError("result has to be either User or Credentials")
code = uuid.uuid4().hex
temp_results[(client_id, result_type, code)] = (
dt_util.utcnow(),
result_type,
result,
)
return code
@callback
def retrieve_result(client_id, result_type, code):
"""Retrieve flow result."""
key = (client_id, result_type, code)
if key not in temp_results:
return None
created, _, result = temp_results.pop(key)
# OAuth 4.2.1
# The authorization code MUST expire shortly after it is issued to
# mitigate the risk of leaks. A maximum authorization code lifetime of
# 10 minutes is RECOMMENDED.
if dt_util.utcnow() - created < timedelta(minutes=10):
return result
return None
return store_result, retrieve_result
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_current_user(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return the current user."""
user = connection.user
enabled_modules = await hass.auth.async_get_enabled_mfa(user)
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"id": user.id,
"name": user.name,
"is_owner": user.is_owner,
"is_admin": user.is_admin,
"credentials": [
{
"auth_provider_type": c.auth_provider_type,
"auth_provider_id": c.auth_provider_id,
}
for c in user.credentials
],
"mfa_modules": [
{
"id": module.id,
"name": module.name,
"enabled": module.id in enabled_modules,
}
for module in hass.auth.auth_mfa_modules
],
},
)
)
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_create_long_lived_access_token(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Create or a long-lived access token."""
refresh_token = await hass.auth.async_create_refresh_token(
connection.user,
client_name=msg["client_name"],
client_icon=msg.get("client_icon"),
token_type=TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=msg["lifespan"]),
)
access_token = hass.auth.async_create_access_token(refresh_token)
connection.send_message(websocket_api.result_message(msg["id"], access_token))
@websocket_api.ws_require_user()
@callback
def websocket_refresh_tokens(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return metadata of users refresh tokens."""
current_id = connection.refresh_token_id
connection.send_message(
websocket_api.result_message(
msg["id"],
[
{
"id": refresh.id,
"client_id": refresh.client_id,
"client_name": refresh.client_name,
"client_icon": refresh.client_icon,
"type": refresh.token_type,
"created_at": refresh.created_at,
"is_current": refresh.id == current_id,
"last_used_at": refresh.last_used_at,
"last_used_ip": refresh.last_used_ip,
}
for refresh in connection.user.refresh_tokens.values()
],
)
)
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_delete_refresh_token(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Handle a delete refresh token request."""
refresh_token = connection.user.refresh_tokens.get(msg["refresh_token_id"])
if refresh_token is None:
return websocket_api.error_message(
msg["id"], "invalid_token_id", "Received invalid token"
)
await hass.auth.async_remove_refresh_token(refresh_token)
connection.send_message(websocket_api.result_message(msg["id"], {}))
@websocket_api.ws_require_user()
@callback
def websocket_sign_path(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Handle a sign path request."""
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"path": async_sign_path(
hass,
connection.refresh_token_id,
msg["path"],
timedelta(seconds=msg["expires"]),
)
},
)
)
| apache-2.0 |
flask-admin/flask-admin | flask_admin/model/form.py | 17 | 7221 | import inspect
import warnings
from flask_admin.form import BaseForm, rules
from flask_admin._compat import iteritems
from wtforms.fields import HiddenField
from wtforms.fields.core import UnboundField
from wtforms.validators import InputRequired
from .widgets import XEditableWidget
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
def create_editable_list_form(form_base_class, form_class, widget=None):
"""
Create a form class with all the fields wrapped in a FieldList.
Wrapping each field in FieldList allows submitting POST requests
in this format: ('<field_name>-<primary_key>', '<value>')
Used in the editable list view.
:param form_base_class:
WTForms form class, by default `form_base_class` from base.
:param form_class:
WTForms form class generated by `form.get_form`.
:param widget:
WTForms widget class. Defaults to `XEditableWidget`.
"""
if widget is None:
widget = XEditableWidget()
class ListForm(form_base_class):
list_form_pk = HiddenField(validators=[InputRequired()])
# iterate FormMeta to get unbound fields, replace widget, copy to ListForm
for name, obj in iteritems(form_class.__dict__):
if isinstance(obj, UnboundField):
obj.kwargs['widget'] = widget
setattr(ListForm, name, obj)
if name == "list_form_pk":
raise Exception('Form already has a list_form_pk column.')
return ListForm
class InlineBaseFormAdmin(object):
"""
Settings for inline form administration.
You can use this class to customize displayed form.
For example::
class MyUserInfoForm(InlineBaseFormAdmin):
form_columns = ('name', 'email')
"""
_defaults = ['form_base_class', 'form_columns', 'form_excluded_columns', 'form_args', 'form_extra_fields']
def __init__(self, **kwargs):
"""
Constructor
:param kwargs:
Additional options
"""
for k in self._defaults:
if not hasattr(self, k):
setattr(self, k, None)
for k, v in iteritems(kwargs):
setattr(self, k, v)
# Convert form rules
form_rules = getattr(self, 'form_rules', None)
if form_rules:
self._form_rules = rules.RuleSet(self, form_rules)
else:
self._form_rules = None
def get_form(self):
"""
If you want to use completely custom form for inline field, you can override
Flask-Admin form generation logic by overriding this method and returning your form.
"""
return None
def postprocess_form(self, form_class):
"""
Post process form. Use this to contribute fields.
For example::
class MyInlineForm(InlineFormAdmin):
def postprocess_form(self, form):
form.value = StringField('value')
return form
class MyAdmin(ModelView):
inline_models = (MyInlineForm(ValueModel),)
"""
return form_class
def on_model_change(self, form, model, is_created):
"""
Called when inline model is about to be saved.
:param form:
Inline form
:param model:
Model
:param is_created:
Will be set to True if the model is being created, False if edited
"""
pass
def _on_model_change(self, form, model, is_created):
"""
Compatibility helper.
"""
try:
self.on_model_change(form, model, is_created)
except TypeError:
msg = ('%s.on_model_change() now accepts third ' +
'parameter is_created. Please update your code') % self.model
warnings.warn(msg)
self.on_model_change(form, model)
class InlineFormAdmin(InlineBaseFormAdmin):
"""
Settings for inline form administration. Used by relational backends (SQLAlchemy, Peewee), where model
class can not be inherited from the parent model definition.
"""
def __init__(self, model, **kwargs):
"""
Constructor
:param model:
Model class
"""
self.model = model
super(InlineFormAdmin, self).__init__(**kwargs)
class ModelConverterBase(object):
def __init__(self, converters=None, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def get_converter(self, column):
if self.use_mro:
types = inspect.getmro(type(column.type))
else:
types = [type(column.type)]
# Search by module + name
for col_type in types:
type_string = '%s.%s' % (col_type.__module__, col_type.__name__)
if type_string in self.converters:
return self.converters[type_string]
# Search by name
for col_type in types:
if col_type.__name__ in self.converters:
return self.converters[col_type.__name__]
return None
def get_form(self, model, base_class=BaseForm,
only=None, exclude=None,
field_args=None):
raise NotImplementedError()
class InlineModelConverterBase(object):
form_admin_class = InlineFormAdmin
def __init__(self, view):
"""
Base constructor
:param view:
View class
"""
self.view = view
def get_label(self, info, name):
"""
Get inline model field label
:param info:
Inline model info
:param name:
Field name
"""
form_name = getattr(info, 'form_label', None)
if form_name:
return form_name
column_labels = getattr(self.view, 'column_labels', None)
if column_labels and name in column_labels:
return column_labels[name]
return None
def get_info(self, p):
"""
Figure out InlineFormAdmin information.
:param p:
Inline model. Can be one of:
- ``tuple``, first value is related model instance,
second is dictionary with options
- ``InlineFormAdmin`` instance
- Model class
"""
if isinstance(p, tuple):
return self.form_admin_class(p[0], **p[1])
elif isinstance(p, self.form_admin_class):
return p
return None
class FieldPlaceholder(object):
"""
Field placeholder for model convertors.
"""
def __init__(self, field):
self.field = field
| bsd-3-clause |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/words/protocols/jabber/sasl_mechanisms.py | 13 | 8730 | # -*- test-case-name: twisted.words.test.test_jabbersaslmechanisms -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Protocol agnostic implementations of SASL authentication mechanisms.
"""
from __future__ import absolute_import, division
import binascii, random, time, os
from hashlib import md5
from zope.interface import Interface, Attribute, implementer
from twisted.python.compat import iteritems, networkString
class ISASLMechanism(Interface):
name = Attribute("""Common name for the SASL Mechanism.""")
def getInitialResponse():
"""
Get the initial client response, if defined for this mechanism.
@return: initial client response string.
@rtype: C{str}.
"""
def getResponse(challenge):
"""
Get the response to a server challenge.
@param challenge: server challenge.
@type challenge: C{str}.
@return: client response.
@rtype: C{str}.
"""
@implementer(ISASLMechanism)
class Anonymous(object):
"""
Implements the ANONYMOUS SASL authentication mechanism.
This mechanism is defined in RFC 2245.
"""
name = 'ANONYMOUS'
def getInitialResponse(self):
return None
@implementer(ISASLMechanism)
class Plain(object):
"""
Implements the PLAIN SASL authentication mechanism.
The PLAIN SASL authentication mechanism is defined in RFC 2595.
"""
name = 'PLAIN'
def __init__(self, authzid, authcid, password):
"""
@param authzid: The authorization identity.
@type authzid: L{unicode}
@param authcid: The authentication identity.
@type authcid: L{unicode}
@param password: The plain-text password.
@type password: L{unicode}
"""
self.authzid = authzid or u''
self.authcid = authcid or u''
self.password = password or u''
def getInitialResponse(self):
return (self.authzid.encode('utf-8') + b"\x00" +
self.authcid.encode('utf-8') + b"\x00" +
self.password.encode('utf-8'))
@implementer(ISASLMechanism)
class DigestMD5(object):
"""
Implements the DIGEST-MD5 SASL authentication mechanism.
The DIGEST-MD5 SASL authentication mechanism is defined in RFC 2831.
"""
name = 'DIGEST-MD5'
def __init__(self, serv_type, host, serv_name, username, password):
"""
@param serv_type: An indication of what kind of server authentication
is being attempted against. For example, C{u"xmpp"}.
@type serv_type: C{unicode}
@param host: The authentication hostname. Also known as the realm.
This is used as a scope to help select the right credentials.
@type host: C{unicode}
@param serv_name: An additional identifier for the server.
@type serv_name: C{unicode}
@param username: The authentication username to use to respond to a
challenge.
@type username: C{unicode}
@param username: The authentication password to use to respond to a
challenge.
@type password: C{unicode}
"""
self.username = username
self.password = password
self.defaultRealm = host
self.digest_uri = u'%s/%s' % (serv_type, host)
if serv_name is not None:
self.digest_uri += u'/%s' % (serv_name,)
def getInitialResponse(self):
return None
def getResponse(self, challenge):
directives = self._parse(challenge)
# Compat for implementations that do not send this along with
# a successful authentication.
if b'rspauth' in directives:
return b''
charset = directives[b'charset'].decode('ascii')
try:
realm = directives[b'realm']
except KeyError:
realm = self.defaultRealm.encode(charset)
return self._genResponse(charset,
realm,
directives[b'nonce'])
def _parse(self, challenge):
"""
Parses the server challenge.
Splits the challenge into a dictionary of directives with values.
@return: challenge directives and their values.
@rtype: C{dict} of C{str} to C{str}.
"""
s = challenge
paramDict = {}
cur = 0
remainingParams = True
while remainingParams:
# Parse a param. We can't just split on commas, because there can
# be some commas inside (quoted) param values, e.g.:
# qop="auth,auth-int"
middle = s.index(b"=", cur)
name = s[cur:middle].lstrip()
middle += 1
if s[middle:middle+1] == b'"':
middle += 1
end = s.index(b'"', middle)
value = s[middle:end]
cur = s.find(b',', end) + 1
if cur == 0:
remainingParams = False
else:
end = s.find(b',', middle)
if end == -1:
value = s[middle:].rstrip()
remainingParams = False
else:
value = s[middle:end].rstrip()
cur = end + 1
paramDict[name] = value
for param in (b'qop', b'cipher'):
if param in paramDict:
paramDict[param] = paramDict[param].split(b',')
return paramDict
def _unparse(self, directives):
"""
Create message string from directives.
@param directives: dictionary of directives (names to their values).
For certain directives, extra quotes are added, as
needed.
@type directives: C{dict} of C{str} to C{str}
@return: message string.
@rtype: C{str}.
"""
directive_list = []
for name, value in iteritems(directives):
if name in (b'username', b'realm', b'cnonce',
b'nonce', b'digest-uri', b'authzid', b'cipher'):
directive = name + b'=' + value
else:
directive = name + b'=' + value
directive_list.append(directive)
return b','.join(directive_list)
def _calculateResponse(self, cnonce, nc, nonce,
username, password, realm, uri):
"""
Calculates response with given encoded parameters.
@return: The I{response} field of a response to a Digest-MD5 challenge
of the given parameters.
@rtype: L{bytes}
"""
def H(s):
return md5(s).digest()
def HEX(n):
return binascii.b2a_hex(n)
def KD(k, s):
return H(k + b':' + s)
a1 = (H(username + b":" + realm + b":" + password) + b":" +
nonce + b":" +
cnonce)
a2 = b"AUTHENTICATE:" + uri
response = HEX(KD(HEX(H(a1)),
nonce + b":" + nc + b":" + cnonce + b":" +
b"auth" + b":" + HEX(H(a2))))
return response
def _genResponse(self, charset, realm, nonce):
"""
Generate response-value.
Creates a response to a challenge according to section 2.1.2.1 of
RFC 2831 using the C{charset}, C{realm} and C{nonce} directives
from the challenge.
"""
try:
username = self.username.encode(charset)
password = self.password.encode(charset)
digest_uri = self.digest_uri.encode(charset)
except UnicodeError:
# TODO - add error checking
raise
nc = networkString('%08x' % (1,)) # TODO: support subsequent auth.
cnonce = self._gen_nonce()
qop = b'auth'
# TODO - add support for authzid
response = self._calculateResponse(cnonce, nc, nonce,
username, password, realm,
digest_uri)
directives = {b'username': username,
b'realm' : realm,
b'nonce' : nonce,
b'cnonce' : cnonce,
b'nc' : nc,
b'qop' : qop,
b'digest-uri': digest_uri,
b'response': response,
b'charset': charset.encode('ascii')}
return self._unparse(directives)
def _gen_nonce(self):
nonceString = "%f:%f:%d" % (random.random(), time.time(), os.getpid())
nonceBytes = networkString(nonceString)
return md5(nonceBytes).hexdigest().encode('ascii')
| mit |
ScradFTW/WhoSampledMyLib | WhoSampledScraper.py | 1 | 6314 | """
File: WhoSampledScraper.py
Author: Brad Jobe
Version: 0.0.1
Scrapes relevant HTML content from an artist's track page
"""
import sys
import eyed3
import urllib2
from lxml import html
import httplib
import json
class WhoSampledScraper:
URL_WHOSAMPLED = "www.whosampled.com"
HTTP_PROTO = "http://"
HTTP_REDIRECT = "3"
SONGS_SAMPLED = 0
WHO_SAMPLED = 2
SONGS_SAMPLED_CALL = "songsSampled"
WHO_SAMPLED_CALL = "whoSampled"
def __init__(self, songLoc):
"""
Parses a songfile for the artist and title ID3 tags and creates the
theoretical path for the songfile's samplepage.
Param: The directory path to a song file, as a string.
Throws: MissingTagException if tag(s) could not be found.
"""
songfile = eyed3.load(songLoc)
try:
self.whoSampledHTML = None
self.artistName = songfile.tag.artist
self.songTitle = songfile.tag.title
self.sampleJSON = {}
if self.artistName == None or self.songTitle == None:
raise MissingTagException()
except MissingTagException:
print "audiofile at " + songLoc + " has missing tag information"
self.whoSampledPath = ("/" + self.artistName + "/" + \
self.songTitle + "/").replace(" ", "-")
self.sampleJSON[self.whoSampledPath] = { self.SONGS_SAMPLED_CALL:{}, \
self.WHO_SAMPLED_CALL: {} }
def getSongsSampled(self):
"""
Returns a list of songs that were sampled in the given track.
"""
jsonSamples = self.sampleScraper(self.SONGS_SAMPLED_CALL)
return self.convertJsontoList(jsonSamples)
def getWhoSampled(self):
"""
Returns a list of songs that have used the given track as a sample.
"""
jsonSamples = self.sampleScraper(self.WHO_SAMPLED_CALL)
return self.convertJsontoList(jsonSamples)
def getHTMLFromPath(self):
"""
Returns the html content from the song's sample page.
Throws: RedirectException if the url is redirected away from the
predicted path of the songs's sample page.
"""
urlCheck = urllib2.urlopen(self.HTTP_PROTO + \
self.URL_WHOSAMPLED + \
self.whoSampledPath)
try:
if urlCheck.geturl().lower() != (self.HTTP_PROTO + \
self.URL_WHOSAMPLED + \
self.whoSampledPath).lower():
raise RedirectException()
except RedirectException:
print "The URL of " + self.songTitle + " by " + self.artistName + \
" was redirected."
return None
return urlCheck.read()
def sampleScraper(self, calltype):
"""
Scrapes sample data from the song's sample page.
Params: a string of specifying what type of sample data is to be
scraped from the sample page.
Returns: a list of song samples, as strings, or an empty list.
"""
self.cachedSamples = self.loadCachedSampleData()
try:
self.cachedSamples[self.whoSampledPath] == None
except KeyError:
self.sampleJson = self.searchForSampleData(calltype)
else:
self.sampleJson = self.cachedSamples[self.whoSampledPath][calltype]
return self.sampleJson
def searchForSampleData(self, calltype):
"""
loads html of artist's track page on WhoSampled.com and parses it for
the relevant sample data.
args: specific type of sample data to parse for
returns: None if sample data could not be found, returns sample data
in json format if successful page parse
"""
if self.whoSampledHTML == None:
self.whoSampledHTML = self.getHTMLFromPath()
if self.whoSampledHTML == None:
return None
splitHTML = self.whoSampledHTML.split("<span Was sampled")
if calltype == self.SONGS_SAMPLED_CALL:
whoSampledDoc = html.document_fromstring( \
splitHTML[self.SONGS_SAMPLED])
elif calltype == self.WHO_SAMPLED_CALL and len(splitHTML) > 1:
whoSampledDoc = html.document_fromstring( \
splitHTML[self.WHO_SAMPLED])
elif len(splitHTML) <= 1:
return None
artistNamesSamples = whoSampledDoc.find_class("trackArtist")
songTitlesSamples = whoSampledDoc.find_class("trackName")
if len(artistNamesSamples) != len(songTitlesSamples) \
or len(artistNamesSamples) < 1:
return None
for i in range(0, len(artistNamesSamples)):
a = artistNamesSamples[i].text_content()
s = songTitlesSamples[i].text_content()
self.sampleJSON[self.whoSampledPath][calltype][a] = s
self.cacheSampleData()
return self.sampleJSON
def loadCachedSampleData(self):
"""
loads stored sample data from previous lookups
returns: json sample data
"""
with open("samples.json", "r") as inSampleFile:
jsonData = json.load(inSampleFile)
inSampleFile.close()
return jsonData
def cacheSampleData(self):
"""
stores sample data that has not been previously cached
"""
self.cachedSamples[self.whoSampledPath] \
= self.sampleJSON[self.whoSampledPath]
with open('samples.json', 'w') as outSampleFile:
json.dump(self.cachedSamples, outSampleFile)
outSampleFile.close()
def convertJsontoList(self, jsonSampleData):
"""
converts JSON sampled data to a python list
args: json to be converted
returns: python list of converted data
"""
sampleList = []
sampleDict = jsonSampleData
if bool(sampleDict) == False:
return None
for key in sampleDict:
sampleList.append(str(sampleDict[key]) + " " + str(key))
return sampleList
class RedirectException(Exception):
pass
class MissingTagException(Exception):
pass
| gpl-2.0 |
brandsoulmates/incubator-airflow | tests/contrib/hooks/test_jira_hook.py | 44 | 1616 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
self.assertTrue(jira_mock.called)
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
andyvand/Arduino-1 | arduino-core/src/processing/app/i18n/python/requests/packages/charade/chardistribution.py | 168 | 9153 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| lgpl-2.1 |
cloudera/hue | desktop/core/ext-py/SQLAlchemy-1.3.17/test/dialect/test_sqlite.py | 2 | 82848 | #!coding: utf-8
"""SQLite-specific tests."""
import datetime
import json
import os
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import column
from sqlalchemy import Computed
from sqlalchemy import create_engine
from sqlalchemy import DefaultClause
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import extract
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import MetaData
from sqlalchemy import pool
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import select
from sqlalchemy import sql
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import tuple_
from sqlalchemy import types as sqltypes
from sqlalchemy import UniqueConstraint
from sqlalchemy import util
from sqlalchemy.dialects.sqlite import base as sqlite
from sqlalchemy.dialects.sqlite import pysqlite as pysqlite_dialect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.url import make_url
from sqlalchemy.schema import CreateTable
from sqlalchemy.schema import FetchedValue
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import combinations
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.types import Boolean
from sqlalchemy.types import Date
from sqlalchemy.types import DateTime
from sqlalchemy.types import Integer
from sqlalchemy.types import String
from sqlalchemy.types import Time
from sqlalchemy.util import u
from sqlalchemy.util import ue
class TestTypes(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = "sqlite"
def test_boolean(self):
"""Test that the boolean only treats 1 as True
"""
meta = MetaData(testing.db)
t = Table(
"bool_table",
meta,
Column("id", Integer, primary_key=True),
Column("boo", Boolean(create_constraint=False)),
)
try:
meta.create_all()
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (1, 'false');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (2, 'true');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (3, '1');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (4, '0');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (5, 1);"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (6, 0);"
)
eq_(
t.select(t.c.boo).order_by(t.c.id).execute().fetchall(),
[(3, True), (5, True)],
)
finally:
meta.drop_all()
def test_string_dates_passed_raise(self):
assert_raises(
exc.StatementError,
testing.db.execute,
select([1]).where(bindparam("date", type_=Date)),
date=str(datetime.date(2007, 10, 30)),
)
def test_cant_parse_datetime_message(self):
for (typ, disp) in [
(Time, "time"),
(DateTime, "datetime"),
(Date, "date"),
]:
assert_raises_message(
ValueError,
"Couldn't parse %s string." % disp,
lambda: testing.db.execute(
text("select 'ASDF' as value").columns(value=typ)
).scalar(),
)
def test_native_datetime(self):
dbapi = testing.db.dialect.dbapi
connect_args = {
"detect_types": dbapi.PARSE_DECLTYPES | dbapi.PARSE_COLNAMES
}
engine = engines.testing_engine(
options={"connect_args": connect_args, "native_datetime": True}
)
t = Table(
"datetest",
MetaData(),
Column("id", Integer, primary_key=True),
Column("d1", Date),
Column("d2", sqltypes.TIMESTAMP),
)
t.create(engine)
try:
engine.execute(
t.insert(),
{
"d1": datetime.date(2010, 5, 10),
"d2": datetime.datetime(2010, 5, 10, 12, 15, 25),
},
)
row = engine.execute(t.select()).first()
eq_(
row,
(
1,
datetime.date(2010, 5, 10),
datetime.datetime(2010, 5, 10, 12, 15, 25),
),
)
r = engine.execute(func.current_date()).scalar()
assert isinstance(r, util.string_types)
finally:
t.drop(engine)
engine.dispose()
@testing.provide_metadata
def test_custom_datetime(self):
sqlite_date = sqlite.DATETIME(
# 2004-05-21T00:00:00
storage_format="%(year)04d-%(month)02d-%(day)02d"
"T%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(
t.insert().values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))
)
testing.db.execute("insert into t (d) values ('2004-05-21T00:00:00')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("2004-05-21T00:00:00",), ("2010-10-15T12:37:00",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[
(datetime.datetime(2004, 5, 21, 0, 0),),
(datetime.datetime(2010, 10, 15, 12, 37),),
],
)
@testing.provide_metadata
def test_custom_datetime_text_affinity(self):
sqlite_date = sqlite.DATETIME(
storage_format="%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d",
regexp=r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(
t.insert().values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))
)
testing.db.execute("insert into t (d) values ('20040521000000')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("20040521000000",), ("20101015123700",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[
(datetime.datetime(2004, 5, 21, 0, 0),),
(datetime.datetime(2010, 10, 15, 12, 37),),
],
)
@testing.provide_metadata
def test_custom_date_text_affinity(self):
sqlite_date = sqlite.DATE(
storage_format="%(year)04d%(month)02d%(day)02d",
regexp=r"(\d{4})(\d{2})(\d{2})",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(t.insert().values(d=datetime.date(2010, 10, 15)))
testing.db.execute("insert into t (d) values ('20040521')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("20040521",), ("20101015",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[(datetime.date(2004, 5, 21),), (datetime.date(2010, 10, 15),)],
)
@testing.provide_metadata
def test_custom_date(self):
sqlite_date = sqlite.DATE(
# 2004-05-21T00:00:00
storage_format="%(year)04d|%(month)02d|%(day)02d",
regexp=r"(\d+)\|(\d+)\|(\d+)",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(t.insert().values(d=datetime.date(2010, 10, 15)))
testing.db.execute("insert into t (d) values ('2004|05|21')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("2004|05|21",), ("2010|10|15",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[(datetime.date(2004, 5, 21),), (datetime.date(2010, 10, 15),)],
)
def test_no_convert_unicode(self):
"""test no utf-8 encoding occurs"""
dialect = sqlite.dialect()
for t in (
String(),
sqltypes.CHAR(),
sqltypes.Unicode(),
sqltypes.UnicodeText(),
String(),
sqltypes.CHAR(),
sqltypes.Unicode(),
sqltypes.UnicodeText(),
):
bindproc = t.dialect_impl(dialect).bind_processor(dialect)
assert not bindproc or isinstance(
bindproc(util.u("some string")), util.text_type
)
class JSONTest(fixtures.TestBase):
__requires__ = ("json_type",)
__only_on__ = "sqlite"
@testing.provide_metadata
@testing.requires.reflects_json_type
def test_reflection(self):
Table("json_test", self.metadata, Column("foo", sqlite.JSON))
self.metadata.create_all()
reflected = Table("json_test", MetaData(), autoload_with=testing.db)
is_(reflected.c.foo.type._type_affinity, sqltypes.JSON)
assert isinstance(reflected.c.foo.type, sqlite.JSON)
@testing.provide_metadata
def test_rudimentary_roundtrip(self):
sqlite_json = Table(
"json_test", self.metadata, Column("foo", sqlite.JSON)
)
self.metadata.create_all()
value = {"json": {"foo": "bar"}, "recs": ["one", "two"]}
with testing.db.connect() as conn:
conn.execute(sqlite_json.insert(), foo=value)
eq_(conn.scalar(select([sqlite_json.c.foo])), value)
@testing.provide_metadata
def test_extract_subobject(self):
sqlite_json = Table(
"json_test", self.metadata, Column("foo", sqlite.JSON)
)
self.metadata.create_all()
value = {"json": {"foo": "bar"}}
with testing.db.connect() as conn:
conn.execute(sqlite_json.insert(), foo=value)
eq_(
conn.scalar(select([sqlite_json.c.foo["json"]])), value["json"]
)
@testing.provide_metadata
def test_deprecated_serializer_args(self):
sqlite_json = Table(
"json_test", self.metadata, Column("foo", sqlite.JSON)
)
data_element = {"foo": "bar"}
js = mock.Mock(side_effect=json.dumps)
jd = mock.Mock(side_effect=json.loads)
with testing.expect_deprecated(
"The _json_deserializer argument to the SQLite "
"dialect has been renamed",
"The _json_serializer argument to the SQLite "
"dialect has been renamed",
):
engine = engines.testing_engine(
options=dict(_json_serializer=js, _json_deserializer=jd)
)
self.metadata.create_all(engine)
engine.execute(sqlite_json.insert(), {"foo": data_element})
row = engine.execute(select([sqlite_json.c.foo])).first()
eq_(row, (data_element,))
eq_(js.mock_calls, [mock.call(data_element)])
eq_(jd.mock_calls, [mock.call(json.dumps(data_element))])
class DateTimeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_time_microseconds(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27 12:00:00.000125")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_truncate_microseconds(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
dt_out = datetime.datetime(2008, 6, 27, 12, 0, 0)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME(truncate_microseconds=True)
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27 12:00:00")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt_out)
def test_custom_format_compact(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME(
storage_format=(
"%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
),
regexp=r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "20080627120000000125")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
class DateTest(fixtures.TestBase, AssertsCompiledSQL):
def test_default(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_custom_format(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=r"(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "06/27/2008")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
class TimeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_default(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_truncate_microseconds(self):
dt = datetime.time(12, 0, 0, 125)
dt_out = datetime.time(12, 0, 0)
eq_(str(dt), "12:00:00.000125")
sldt = sqlite.TIME(truncate_microseconds=True)
bp = sldt.bind_processor(None)
eq_(bp(dt), "12:00:00")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt_out)
def test_custom_format(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE(
storage_format="%(year)04d%(month)02d%(day)02d",
regexp=r"(\d{4})(\d{2})(\d{2})",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "20080627")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
class DefaultsTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "sqlite"
@testing.exclude(
"sqlite",
"<",
(3, 3, 8),
"sqlite3 changesets 3353 and 3440 modified "
"behavior of default displayed in pragma "
"table_info()",
)
def test_default_reflection(self):
# (ask_for, roundtripped_as_if_different)
specs = [
(String(3), '"foo"'),
(sqltypes.NUMERIC(10, 2), "100.50"),
(Integer, "5"),
(Boolean, "False"),
]
columns = [
Column("c%i" % (i + 1), t[0], server_default=text(t[1]))
for (i, t) in enumerate(specs)
]
db = testing.db
m = MetaData(db)
Table("t_defaults", m, *columns)
try:
m.create_all()
m2 = MetaData(db)
rt = Table("t_defaults", m2, autoload=True)
expected = [c[1] for c in specs]
for i, reflected in enumerate(rt.c):
eq_(str(reflected.server_default.arg), expected[i])
finally:
m.drop_all()
@testing.exclude(
"sqlite",
"<",
(3, 3, 8),
"sqlite3 changesets 3353 and 3440 modified "
"behavior of default displayed in pragma "
"table_info()",
)
def test_default_reflection_2(self):
db = testing.db
m = MetaData(db)
expected = ["'my_default'", "0"]
table = """CREATE TABLE r_defaults (
data VARCHAR(40) DEFAULT 'my_default',
val INTEGER NOT NULL DEFAULT 0
)"""
try:
db.execute(table)
rt = Table("r_defaults", m, autoload=True)
for i, reflected in enumerate(rt.c):
eq_(str(reflected.server_default.arg), expected[i])
finally:
db.execute("DROP TABLE r_defaults")
def test_default_reflection_3(self):
db = testing.db
table = """CREATE TABLE r_defaults (
data VARCHAR(40) DEFAULT 'my_default',
val INTEGER NOT NULL DEFAULT 0
)"""
try:
db.execute(table)
m1 = MetaData(db)
t1 = Table("r_defaults", m1, autoload=True)
db.execute("DROP TABLE r_defaults")
t1.create()
m2 = MetaData(db)
t2 = Table("r_defaults", m2, autoload=True)
self.assert_compile(
CreateTable(t2),
"CREATE TABLE r_defaults (data VARCHAR(40) "
"DEFAULT 'my_default', val INTEGER DEFAULT 0 "
"NOT NULL)",
)
finally:
db.execute("DROP TABLE r_defaults")
@testing.provide_metadata
def test_boolean_default(self):
t = Table(
"t",
self.metadata,
Column("x", Boolean, server_default=sql.false()),
)
t.create(testing.db)
with testing.db.connect() as conn:
conn.execute(t.insert())
conn.execute(t.insert().values(x=True))
eq_(
conn.execute(t.select().order_by(t.c.x)).fetchall(),
[(False,), (True,)],
)
@testing.provide_metadata
def test_function_default(self):
t = Table(
"t",
self.metadata,
Column("id", Integer, primary_key=True),
Column("x", DateTime(), server_default=func.now()),
)
t.create(testing.db)
with testing.db.connect() as conn:
now = conn.scalar(func.now())
today = datetime.datetime.today()
conn.execute(t.insert())
conn.execute(t.insert().values(x=today))
eq_(
conn.execute(select([t.c.x]).order_by(t.c.id)).fetchall(),
[(now,), (today,)],
)
@testing.provide_metadata
def test_expression_with_function_default(self):
t = Table(
"t",
self.metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer(), server_default=func.abs(-5) + 17),
)
t.create(testing.db)
with testing.db.connect() as conn:
conn.execute(t.insert())
conn.execute(t.insert().values(x=35))
eq_(
conn.execute(select([t.c.x]).order_by(t.c.id)).fetchall(),
[(22,), (35,)],
)
def test_old_style_default(self):
"""test non-quoted integer value on older sqlite pragma"""
dialect = sqlite.dialect()
info = dialect._get_column_info("foo", "INTEGER", False, 3, False)
eq_(info["default"], "3")
class DialectTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = "sqlite"
def test_extra_reserved_words(self):
"""Tests reserved words in identifiers.
'true', 'false', and 'column' are undocumented reserved words
when used as column identifiers (as of 3.5.1). Covering them
here to ensure they remain in place if the dialect's
reserved_words set is updated in the future. """
meta = MetaData(testing.db)
t = Table(
"reserved",
meta,
Column("safe", Integer),
Column("true", Integer),
Column("false", Integer),
Column("column", Integer),
)
try:
meta.create_all()
t.insert().execute(safe=1)
list(t.select().execute())
finally:
meta.drop_all()
@testing.provide_metadata
def test_quoted_identifiers_functional_one(self):
"""Tests autoload of tables created with quoted column names."""
metadata = self.metadata
testing.db.execute(
"""CREATE TABLE "django_content_type" (
"id" integer NOT NULL PRIMARY KEY,
"django_stuff" text NULL
)
"""
)
testing.db.execute(
"""
CREATE TABLE "django_admin_log" (
"id" integer NOT NULL PRIMARY KEY,
"action_time" datetime NOT NULL,
"content_type_id" integer NULL
REFERENCES "django_content_type" ("id"),
"object_id" text NULL,
"change_message" text NOT NULL
)
"""
)
table1 = Table("django_admin_log", metadata, autoload=True)
table2 = Table("django_content_type", metadata, autoload=True)
j = table1.join(table2)
assert j.onclause.compare(table1.c.content_type_id == table2.c.id)
@testing.provide_metadata
def test_quoted_identifiers_functional_two(self):
""""test the edgiest of edge cases, quoted table/col names
that start and end with quotes.
SQLite claims to have fixed this in
http://www.sqlite.org/src/info/600482d161, however
it still fails if the FK points to a table name that actually
has quotes as part of its name.
"""
metadata = self.metadata
testing.db.execute(
r'''CREATE TABLE """a""" (
"""id""" integer NOT NULL PRIMARY KEY
)
'''
)
# unfortunately, still can't do this; sqlite quadruples
# up the quotes on the table name here for pragma foreign_key_list
# testing.db.execute(r'''
# CREATE TABLE """b""" (
# """id""" integer NOT NULL PRIMARY KEY,
# """aid""" integer NULL
# REFERENCES """a""" ("""id""")
# )
# ''')
table1 = Table(r'"a"', metadata, autoload=True)
assert '"id"' in table1.c
# table2 = Table(r'"b"', metadata, autoload=True)
# j = table1.join(table2)
# assert j.onclause.compare(table1.c['"id"']
# == table2.c['"aid"'])
@testing.provide_metadata
def test_description_encoding(self):
# amazingly, pysqlite seems to still deliver cursor.description
# as encoded bytes in py2k
t = Table(
"x",
self.metadata,
Column(u("méil"), Integer, primary_key=True),
Column(ue("\u6e2c\u8a66"), Integer),
)
self.metadata.create_all(testing.db)
result = testing.db.execute(t.select())
assert u("méil") in result.keys()
assert ue("\u6e2c\u8a66") in result.keys()
def test_pool_class(self):
e = create_engine("sqlite+pysqlite://")
assert e.pool.__class__ is pool.SingletonThreadPool
e = create_engine("sqlite+pysqlite:///:memory:")
assert e.pool.__class__ is pool.SingletonThreadPool
e = create_engine("sqlite+pysqlite:///foo.db")
assert e.pool.__class__ is pool.NullPool
@combinations(
(
"sqlite:///foo.db", # file path is absolute
([os.path.abspath("foo.db")], {}),
),
(
"sqlite:////abs/path/to/foo.db",
([os.path.abspath("/abs/path/to/foo.db")], {}),
),
("sqlite://", ([":memory:"], {})),
(
"sqlite:///?check_same_thread=true",
([":memory:"], {"check_same_thread": True}),
),
(
"sqlite:///file:path/to/database?"
"check_same_thread=true&timeout=10&mode=ro&nolock=1&uri=true",
(
["file:path/to/database?mode=ro&nolock=1"],
{"check_same_thread": True, "timeout": 10.0, "uri": True},
),
),
(
"sqlite:///file:path/to/database?" "mode=ro&uri=true",
(["file:path/to/database?mode=ro"], {"uri": True}),
),
(
"sqlite:///file:path/to/database?uri=true",
(["file:path/to/database"], {"uri": True}),
),
)
def test_connect_args(self, url, expected):
"""test create_connect_args scenarios including support for uri=True"""
d = pysqlite_dialect.dialect()
url = make_url(url)
eq_(d.create_connect_args(url), expected)
@testing.combinations(
("no_persisted", "ignore"),
("persisted_none", None),
("persisted_true", True),
("persisted_false", False),
id_="ia",
)
def test_column_computed(self, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
assert_raises_message(
exc.CompileError,
"SQLite does not support computed columns",
schema.CreateTable(t).compile,
dialect=sqlite.dialect(),
)
class AttachedDBTest(fixtures.TestBase):
__only_on__ = "sqlite"
def _fixture(self):
meta = self.metadata
self.conn = testing.db.connect()
Table("created", meta, Column("foo", Integer), Column("bar", String))
Table("local_only", meta, Column("q", Integer), Column("p", Integer))
ct = Table(
"created",
meta,
Column("id", Integer),
Column("name", String),
schema="test_schema",
)
Table(
"another_created",
meta,
Column("bat", Integer),
Column("hoho", String),
schema="test_schema",
)
meta.create_all(self.conn)
return ct
def setup(self):
self.conn = testing.db.connect()
self.metadata = MetaData()
def teardown(self):
self.metadata.drop_all(self.conn)
self.conn.close()
def test_no_tables(self):
insp = inspect(self.conn)
eq_(insp.get_table_names("test_schema"), [])
def test_column_names(self):
self._fixture()
insp = inspect(self.conn)
eq_(
[
d["name"]
for d in insp.get_columns("created", schema="test_schema")
],
["id", "name"],
)
eq_(
[d["name"] for d in insp.get_columns("created", schema=None)],
["foo", "bar"],
)
eq_(
[
d["name"]
for d in insp.get_columns("nonexistent", schema="test_schema")
],
[],
)
eq_(
[
d["name"]
for d in insp.get_columns("another_created", schema=None)
],
[],
)
eq_(
[
d["name"]
for d in insp.get_columns("local_only", schema="test_schema")
],
[],
)
eq_([d["name"] for d in insp.get_columns("local_only")], ["q", "p"])
def test_table_names_present(self):
self._fixture()
insp = inspect(self.conn)
eq_(
set(insp.get_table_names("test_schema")),
{"created", "another_created"},
)
def test_table_names_system(self):
self._fixture()
insp = inspect(self.conn)
eq_(
set(insp.get_table_names("test_schema")),
{"created", "another_created"},
)
def test_schema_names(self):
self._fixture()
insp = inspect(self.conn)
eq_(insp.get_schema_names(), ["main", "test_schema"])
# implicitly creates a "temp" schema
self.conn.execute("select * from sqlite_temp_master")
# we're not including it
insp = inspect(self.conn)
eq_(insp.get_schema_names(), ["main", "test_schema"])
def test_reflect_system_table(self):
meta = MetaData(self.conn)
alt_master = Table(
"sqlite_master",
meta,
autoload=True,
autoload_with=self.conn,
schema="test_schema",
)
assert len(alt_master.c) > 0
def test_reflect_user_table(self):
self._fixture()
m2 = MetaData()
c2 = Table("created", m2, autoload=True, autoload_with=self.conn)
eq_(len(c2.c), 2)
def test_crud(self):
ct = self._fixture()
self.conn.execute(ct.insert(), {"id": 1, "name": "foo"})
eq_(self.conn.execute(ct.select()).fetchall(), [(1, "foo")])
self.conn.execute(ct.update(), {"id": 2, "name": "bar"})
eq_(self.conn.execute(ct.select()).fetchall(), [(2, "bar")])
self.conn.execute(ct.delete())
eq_(self.conn.execute(ct.select()).fetchall(), [])
def test_col_targeting(self):
ct = self._fixture()
self.conn.execute(ct.insert(), {"id": 1, "name": "foo"})
row = self.conn.execute(ct.select()).first()
eq_(row["id"], 1)
eq_(row["name"], "foo")
def test_col_targeting_union(self):
ct = self._fixture()
self.conn.execute(ct.insert(), {"id": 1, "name": "foo"})
row = self.conn.execute(ct.select().union(ct.select())).first()
eq_(row["id"], 1)
eq_(row["name"], "foo")
class SQLTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests SQLite-dialect specific compilation."""
__dialect__ = sqlite.dialect()
def test_extract(self):
t = sql.table("t", sql.column("col1"))
mapping = {
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
}
for field, subst in mapping.items():
self.assert_compile(
select([extract(field, t.c.col1)]),
"SELECT CAST(STRFTIME('%s', t.col1) AS "
"INTEGER) AS anon_1 FROM t" % subst,
)
def test_true_false(self):
self.assert_compile(sql.false(), "0")
self.assert_compile(sql.true(), "1")
def test_is_distinct_from(self):
self.assert_compile(
sql.column("x").is_distinct_from(None), "x IS NOT NULL"
)
self.assert_compile(
sql.column("x").isnot_distinct_from(False), "x IS 0"
)
def test_localtime(self):
self.assert_compile(
func.localtimestamp(), 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
)
def test_constraints_with_schemas(self):
metadata = MetaData()
Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
schema="master",
)
t2 = Table(
"t2",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
schema="master",
)
t3 = Table(
"t3",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
schema="alternate",
)
t4 = Table(
"t4",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
)
# schema->schema, generate REFERENCES with no schema name
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE master.t2 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(t1_id) REFERENCES t1 (id)"
")",
)
# schema->different schema, don't generate REFERENCES
self.assert_compile(
schema.CreateTable(t3),
"CREATE TABLE alternate.t3 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id)"
")",
)
# same for local schema
self.assert_compile(
schema.CreateTable(t4),
"CREATE TABLE t4 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id)"
")",
)
def test_column_defaults_ddl(self):
t = Table(
"t", MetaData(), Column("x", Boolean, server_default=sql.false())
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x BOOLEAN DEFAULT (0), CHECK (x IN (0, 1)))",
)
t = Table(
"t",
MetaData(),
Column("x", String(), server_default=func.sqlite_version()),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x VARCHAR DEFAULT (sqlite_version()))",
)
t = Table(
"t",
MetaData(),
Column("x", Integer(), server_default=func.abs(-5) + 17),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT (abs(-5) + 17))"
)
def test_create_partial_index(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx = Index(
"test_idx1",
tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
# test quoting and all that
idx2 = Index(
"test_idx2",
tbl.c.data,
sqlite_where=and_(tbl.c.data > "a", tbl.c.data < "b's"),
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (data) "
"WHERE data > 5 AND data < 10",
dialect=sqlite.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data) "
"WHERE data > 'a' AND data < 'b''s'",
dialect=sqlite.dialect(),
)
def test_no_autoinc_on_composite_pk(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, primary_key=True, autoincrement=True),
Column("y", Integer, primary_key=True),
)
assert_raises_message(
exc.CompileError,
"SQLite does not support autoincrement for composite",
CreateTable(t).compile,
dialect=sqlite.dialect(),
)
def test_in_tuple(self):
self.assert_compile(
tuple_(column("q"), column("p")).in_([(1, 2), (3, 4)]),
"(q, p) IN (VALUES (?, ?), (?, ?))",
)
class OnConflictDDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = sqlite.dialect()
def test_on_conflict_clause_column_not_null(self):
c = Column(
"test", Integer, nullable=False, sqlite_on_conflict_not_null="FAIL"
)
self.assert_compile(
schema.CreateColumn(c),
"test INTEGER NOT NULL " "ON CONFLICT FAIL",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_column_many_clause(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"test",
Integer,
nullable=False,
primary_key=True,
sqlite_on_conflict_not_null="FAIL",
sqlite_on_conflict_primary_key="IGNORE",
),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n ("
"test INTEGER NOT NULL ON CONFLICT FAIL, "
"PRIMARY KEY (test) ON CONFLICT IGNORE)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_unique_constraint_from_column(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"x", String(30), unique=True, sqlite_on_conflict_unique="FAIL"
),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (x VARCHAR(30), " "UNIQUE (x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_unique_constraint(self):
meta = MetaData()
t = Table(
"n",
meta,
Column("id", Integer),
Column("x", String(30)),
UniqueConstraint("id", "x", sqlite_on_conflict="FAIL"),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (id INTEGER, x VARCHAR(30), "
"UNIQUE (id, x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_primary_key(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"id",
Integer,
primary_key=True,
sqlite_on_conflict_primary_key="FAIL",
),
sqlite_autoincrement=True,
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (id INTEGER NOT NULL "
"PRIMARY KEY ON CONFLICT FAIL AUTOINCREMENT)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_primary_key_constraint_from_column(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"x",
String(30),
sqlite_on_conflict_primary_key="FAIL",
primary_key=True,
),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (x VARCHAR(30) NOT NULL, "
"PRIMARY KEY (x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_check_constraint(self):
meta = MetaData()
t = Table(
"n",
meta,
Column("id", Integer),
Column("x", Integer),
CheckConstraint("id > x", sqlite_on_conflict="FAIL"),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (id INTEGER, x INTEGER, "
"CHECK (id > x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_check_constraint_from_column(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"x",
Integer,
CheckConstraint("x > 1", sqlite_on_conflict="FAIL"),
),
)
assert_raises_message(
exc.CompileError,
"SQLite does not support on conflict "
"clause for column check constraint",
CreateTable(t).compile,
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_primary_key_constraint(self):
meta = MetaData()
t = Table(
"n",
meta,
Column("id", Integer),
Column("x", String(30)),
PrimaryKeyConstraint("id", "x", sqlite_on_conflict="FAIL"),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n ("
"id INTEGER NOT NULL, "
"x VARCHAR(30) NOT NULL, "
"PRIMARY KEY (id, x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
class InsertTest(fixtures.TestBase, AssertsExecutionResults):
"""Tests inserts and autoincrement."""
__only_on__ = "sqlite"
# empty insert (i.e. INSERT INTO table DEFAULT VALUES) fails on
# 3.3.7 and before
def _test_empty_insert(self, table, expect=1):
try:
table.create()
for wanted in expect, expect * 2:
table.insert().execute()
rows = table.select().execute().fetchall()
eq_(len(rows), wanted)
finally:
table.drop()
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk1(self):
self._test_empty_insert(
Table(
"a",
MetaData(testing.db),
Column("id", Integer, primary_key=True),
)
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk2(self):
# now warns due to [ticket:3216]
with expect_warnings(
"Column 'b.x' is marked as a member of the "
"primary key for table 'b'",
"Column 'b.y' is marked as a member of the "
"primary key for table 'b'",
):
assert_raises(
exc.IntegrityError,
self._test_empty_insert,
Table(
"b",
MetaData(testing.db),
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk2_fv(self):
assert_raises(
exc.DBAPIError,
self._test_empty_insert,
Table(
"b",
MetaData(testing.db),
Column(
"x",
Integer,
primary_key=True,
server_default=FetchedValue(),
),
Column(
"y",
Integer,
primary_key=True,
server_default=FetchedValue(),
),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk3(self):
# now warns due to [ticket:3216]
with expect_warnings(
"Column 'c.x' is marked as a member of the primary key for table"
):
assert_raises(
exc.IntegrityError,
self._test_empty_insert,
Table(
"c",
MetaData(testing.db),
Column("x", Integer, primary_key=True),
Column(
"y", Integer, DefaultClause("123"), primary_key=True
),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk3_fv(self):
assert_raises(
exc.DBAPIError,
self._test_empty_insert,
Table(
"c",
MetaData(testing.db),
Column(
"x",
Integer,
primary_key=True,
server_default=FetchedValue(),
),
Column("y", Integer, DefaultClause("123"), primary_key=True),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk4(self):
self._test_empty_insert(
Table(
"d",
MetaData(testing.db),
Column("x", Integer, primary_key=True),
Column("y", Integer, DefaultClause("123")),
)
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_nopk1(self):
self._test_empty_insert(
Table("e", MetaData(testing.db), Column("id", Integer))
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_nopk2(self):
self._test_empty_insert(
Table(
"f",
MetaData(testing.db),
Column("x", Integer),
Column("y", Integer),
)
)
def test_inserts_with_spaces(self):
tbl = Table(
"tbl",
MetaData("sqlite:///"),
Column("with space", Integer),
Column("without", Integer),
)
tbl.create()
try:
tbl.insert().execute({"without": 123})
assert list(tbl.select().execute()) == [(None, 123)]
tbl.insert().execute({"with space": 456})
assert list(tbl.select().execute()) == [(None, 123), (456, None)]
finally:
tbl.drop()
def full_text_search_missing():
"""Test if full text search is not implemented and return False if
it is and True otherwise."""
try:
testing.db.execute("CREATE VIRTUAL TABLE t using FTS3;")
testing.db.execute("DROP TABLE t;")
return False
except Exception:
return True
metadata = cattable = matchtable = None
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "sqlite"
__skip_if__ = (full_text_search_missing,)
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
testing.db.execute(
"""
CREATE VIRTUAL TABLE cattable using FTS3 (
id INTEGER NOT NULL,
description VARCHAR(50),
PRIMARY KEY (id)
)
"""
)
cattable = Table("cattable", metadata, autoload=True)
testing.db.execute(
"""
CREATE VIRTUAL TABLE matchtable using FTS3 (
id INTEGER NOT NULL,
title VARCHAR(200),
category_id INTEGER NOT NULL,
PRIMARY KEY (id)
)
"""
)
matchtable = Table("matchtable", metadata, autoload=True)
metadata.create_all()
cattable.insert().execute(
[
{"id": 1, "description": "Python"},
{"id": 2, "description": "Ruby"},
]
)
matchtable.insert().execute(
[
{
"id": 1,
"title": "Agile Web Development with Rails",
"category_id": 2,
},
{"id": 2, "title": "Dive Into Python", "category_id": 1},
{
"id": 3,
"title": "Programming Matz's Ruby",
"category_id": 2,
},
{
"id": 4,
"title": "The Definitive Guide to Django",
"category_id": 1,
},
{"id": 5, "title": "Python in a Nutshell", "category_id": 1},
]
)
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_expression(self):
self.assert_compile(
matchtable.c.title.match("somstr"),
"matchtable.title MATCH ?",
dialect=sqlite.dialect(),
)
def test_simple_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([2, 5], [r.id for r in results])
def test_simple_prefix_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("nut*"))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results])
def test_or_match(self):
results2 = (
matchtable.select()
.where(matchtable.c.title.match("nutshell OR ruby"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results2 = (
matchtable.select()
.where(matchtable.c.title.match("python nutshell"))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = (
matchtable.select()
.where(
and_(
cattable.c.id == matchtable.c.category_id,
cattable.c.description.match("Ruby"),
)
)
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([1, 3], [r.id for r in results])
class AutoIncrementTest(fixtures.TestBase, AssertsCompiledSQL):
def test_sqlite_autoincrement(self):
table = Table(
"autoinctable",
MetaData(),
Column("id", Integer, primary_key=True),
Column("x", Integer, default=None),
sqlite_autoincrement=True,
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE autoinctable (id INTEGER NOT "
"NULL PRIMARY KEY AUTOINCREMENT, x INTEGER)",
dialect=sqlite.dialect(),
)
def test_sqlite_autoincrement_constraint(self):
table = Table(
"autoinctable",
MetaData(),
Column("id", Integer, primary_key=True),
Column("x", Integer, default=None),
UniqueConstraint("x"),
sqlite_autoincrement=True,
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE autoinctable (id INTEGER NOT "
"NULL PRIMARY KEY AUTOINCREMENT, x "
"INTEGER, UNIQUE (x))",
dialect=sqlite.dialect(),
)
def test_sqlite_no_autoincrement(self):
table = Table(
"noautoinctable",
MetaData(),
Column("id", Integer, primary_key=True),
Column("x", Integer, default=None),
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE noautoinctable (id INTEGER "
"NOT NULL, x INTEGER, PRIMARY KEY (id))",
dialect=sqlite.dialect(),
)
def test_sqlite_autoincrement_int_affinity(self):
class MyInteger(sqltypes.TypeDecorator):
impl = Integer
table = Table(
"autoinctable",
MetaData(),
Column("id", MyInteger, primary_key=True),
sqlite_autoincrement=True,
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE autoinctable (id INTEGER NOT "
"NULL PRIMARY KEY AUTOINCREMENT)",
dialect=sqlite.dialect(),
)
class ReflectHeadlessFKsTest(fixtures.TestBase):
__only_on__ = "sqlite"
def setup(self):
testing.db.execute("CREATE TABLE a (id INTEGER PRIMARY KEY)")
# this syntax actually works on other DBs perhaps we'd want to add
# tests to test_reflection
testing.db.execute(
"CREATE TABLE b (id INTEGER PRIMARY KEY REFERENCES a)"
)
def teardown(self):
testing.db.execute("drop table b")
testing.db.execute("drop table a")
def test_reflect_tables_fk_no_colref(self):
meta = MetaData()
a = Table("a", meta, autoload=True, autoload_with=testing.db)
b = Table("b", meta, autoload=True, autoload_with=testing.db)
assert b.c.id.references(a.c.id)
class KeywordInDatabaseNameTest(fixtures.TestBase):
__only_on__ = "sqlite"
@classmethod
def setup_class(cls):
with testing.db.begin() as conn:
conn.execute('ATTACH %r AS "default"' % conn.engine.url.database)
conn.execute('CREATE TABLE "default".a (id INTEGER PRIMARY KEY)')
@classmethod
def teardown_class(cls):
with testing.db.begin() as conn:
try:
conn.execute('drop table "default".a')
except Exception:
pass
conn.execute('DETACH DATABASE "default"')
def test_reflect(self):
with testing.db.begin() as conn:
meta = MetaData(bind=conn, schema="default")
meta.reflect()
assert "default.a" in meta.tables
class ConstraintReflectionTest(fixtures.TestBase):
__only_on__ = "sqlite"
@classmethod
def setup_class(cls):
with testing.db.begin() as conn:
conn.execute("CREATE TABLE a1 (id INTEGER PRIMARY KEY)")
conn.execute("CREATE TABLE a2 (id INTEGER PRIMARY KEY)")
conn.execute(
"CREATE TABLE b (id INTEGER PRIMARY KEY, "
"FOREIGN KEY(id) REFERENCES a1(id),"
"FOREIGN KEY(id) REFERENCES a2(id)"
")"
)
conn.execute(
"CREATE TABLE c (id INTEGER, "
"CONSTRAINT bar PRIMARY KEY(id),"
"CONSTRAINT foo1 FOREIGN KEY(id) REFERENCES a1(id),"
"CONSTRAINT foo2 FOREIGN KEY(id) REFERENCES a2(id)"
")"
)
conn.execute(
# the lower casing + inline is intentional here
"CREATE TABLE d (id INTEGER, x INTEGER unique)"
)
conn.execute(
# the lower casing + inline is intentional here
"CREATE TABLE d1 "
'(id INTEGER, "some ( STUPID n,ame" INTEGER unique)'
)
conn.execute(
# the lower casing + inline is intentional here
'CREATE TABLE d2 ( "some STUPID n,ame" INTEGER unique)'
)
conn.execute(
# the lower casing + inline is intentional here
'CREATE TABLE d3 ( "some STUPID n,ame" INTEGER NULL unique)'
)
conn.execute(
# lower casing + inline is intentional
"CREATE TABLE e (id INTEGER, x INTEGER references a2(id))"
)
conn.execute(
'CREATE TABLE e1 (id INTEGER, "some ( STUPID n,ame" INTEGER '
'references a2 ("some ( STUPID n,ame"))'
)
conn.execute(
"CREATE TABLE e2 (id INTEGER, "
'"some ( STUPID n,ame" INTEGER NOT NULL '
'references a2 ("some ( STUPID n,ame"))'
)
conn.execute(
"CREATE TABLE f (x INTEGER, CONSTRAINT foo_fx UNIQUE(x))"
)
conn.execute(
"CREATE TEMPORARY TABLE g "
"(x INTEGER, CONSTRAINT foo_gx UNIQUE(x))"
)
conn.execute(
# intentional broken casing
"CREATE TABLE h (x INTEGER, COnstraINT foo_hx unIQUE(x))"
)
conn.execute(
"CREATE TABLE i (x INTEGER, y INTEGER, PRIMARY KEY(x, y))"
)
conn.execute(
"CREATE TABLE j (id INTEGER, q INTEGER, p INTEGER, "
"PRIMARY KEY(id), FOreiGN KEY(q,p) REFERENCes i(x,y))"
)
conn.execute(
"CREATE TABLE k (id INTEGER, q INTEGER, p INTEGER, "
"PRIMARY KEY(id), "
"conSTRAINT my_fk FOreiGN KEY ( q , p ) "
"REFERENCes i ( x , y ))"
)
meta = MetaData()
Table("l", meta, Column("bar", String, index=True), schema="main")
Table(
"m",
meta,
Column("id", Integer, primary_key=True),
Column("x", String(30)),
UniqueConstraint("x"),
)
Table(
"n",
meta,
Column("id", Integer, primary_key=True),
Column("x", String(30)),
UniqueConstraint("x"),
prefixes=["TEMPORARY"],
)
Table(
"p",
meta,
Column("id", Integer),
PrimaryKeyConstraint("id", name="pk_name"),
)
Table("q", meta, Column("id", Integer), PrimaryKeyConstraint("id"))
meta.create_all(conn)
# will contain an "autoindex"
conn.execute("create table o (foo varchar(20) primary key)")
conn.execute(
"CREATE TABLE onud_test (id INTEGER PRIMARY KEY, "
"c1 INTEGER, c2 INTEGER, c3 INTEGER, c4 INTEGER, "
"CONSTRAINT fk1 FOREIGN KEY (c1) REFERENCES a1(id) "
"ON DELETE SET NULL, "
"CONSTRAINT fk2 FOREIGN KEY (c2) REFERENCES a1(id) "
"ON UPDATE CASCADE, "
"CONSTRAINT fk3 FOREIGN KEY (c3) REFERENCES a2(id) "
"ON DELETE CASCADE ON UPDATE SET NULL,"
"CONSTRAINT fk4 FOREIGN KEY (c4) REFERENCES a2(id) "
"ON UPDATE NO ACTION)"
)
conn.execute(
"CREATE TABLE cp ("
"q INTEGER check (q > 1 AND q < 6),\n"
"CONSTRAINT cq CHECK (q == 1 OR (q > 2 AND q < 5))\n"
")"
)
conn.execute(
"CREATE TABLE implicit_referred (pk integer primary key)"
)
# single col foreign key with no referred column given,
# must assume primary key of referred table
conn.execute(
"CREATE TABLE implicit_referrer "
"(id integer REFERENCES implicit_referred)"
)
conn.execute(
"CREATE TABLE implicit_referred_comp "
"(pk1 integer, pk2 integer, primary key (pk1, pk2))"
)
# composite foreign key with no referred columns given,
# must assume primary key of referred table
conn.execute(
"CREATE TABLE implicit_referrer_comp "
"(id1 integer, id2 integer, foreign key(id1, id2) "
"REFERENCES implicit_referred_comp)"
)
# worst case - FK that refers to nonexistent table so we cant
# get pks. requires FK pragma is turned off
conn.execute(
"CREATE TABLE implicit_referrer_comp_fake "
"(id1 integer, id2 integer, foreign key(id1, id2) "
"REFERENCES fake_table)"
)
@classmethod
def teardown_class(cls):
with testing.db.begin() as conn:
for name in [
"implicit_referrer_comp_fake",
"implicit_referrer",
"implicit_referred",
"implicit_referrer_comp",
"implicit_referred_comp",
"m",
"main.l",
"k",
"j",
"i",
"h",
"g",
"f",
"e",
"e1",
"d",
"d1",
"d2",
"c",
"b",
"a1",
"a2",
]:
try:
conn.execute("drop table %s" % name)
except Exception:
pass
def test_legacy_quoted_identifiers_unit(self):
dialect = sqlite.dialect()
dialect._broken_fk_pragma_quotes = True
for row in [
(0, None, "target", "tid", "id", None),
(0, None, '"target"', "tid", "id", None),
(0, None, "[target]", "tid", "id", None),
(0, None, "'target'", "tid", "id", None),
(0, None, "`target`", "tid", "id", None),
]:
def _get_table_pragma(*arg, **kw):
return [row]
def _get_table_sql(*arg, **kw):
return (
"CREATE TABLE foo "
"(tid INTEGER, "
"FOREIGN KEY(tid) REFERENCES %s (id))" % row[2]
)
with mock.patch.object(
dialect, "_get_table_pragma", _get_table_pragma
):
with mock.patch.object(
dialect, "_get_table_sql", _get_table_sql
):
fkeys = dialect.get_foreign_keys(None, "foo")
eq_(
fkeys,
[
{
"referred_table": "target",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["tid"],
"options": {},
}
],
)
def test_foreign_key_name_is_none(self):
# and not "0"
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("b")
eq_(
fks,
[
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["id"],
"options": {},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["id"],
"options": {},
},
],
)
def test_foreign_key_name_is_not_none(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("c")
eq_(
fks,
[
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": "foo1",
"constrained_columns": ["id"],
"options": {},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": "foo2",
"constrained_columns": ["id"],
"options": {},
},
],
)
def test_foreign_key_implicit_parent(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("implicit_referrer")
eq_(
fks,
[
{
"name": None,
"constrained_columns": ["id"],
"referred_schema": None,
"referred_table": "implicit_referred",
"referred_columns": ["pk"],
"options": {},
}
],
)
def test_foreign_key_composite_implicit_parent(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("implicit_referrer_comp")
eq_(
fks,
[
{
"name": None,
"constrained_columns": ["id1", "id2"],
"referred_schema": None,
"referred_table": "implicit_referred_comp",
"referred_columns": ["pk1", "pk2"],
"options": {},
}
],
)
def test_foreign_key_implicit_missing_parent(self):
# test when the FK refers to a non-existent table and column names
# aren't given. only sqlite allows this case to exist
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("implicit_referrer_comp_fake")
# the referred table doesn't exist but the operation does not fail
eq_(
fks,
[
{
"name": None,
"constrained_columns": ["id1", "id2"],
"referred_schema": None,
"referred_table": "fake_table",
"referred_columns": [],
"options": {},
}
],
)
def test_foreign_key_implicit_missing_parent_reflection(self):
# full Table reflection fails however, which is not a new behavior
m = MetaData()
assert_raises_message(
exc.NoSuchTableError,
"fake_table",
Table,
"implicit_referrer_comp_fake",
m,
autoload_with=testing.db,
)
def test_unnamed_inline_foreign_key(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("e")
eq_(
fks,
[
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["x"],
"options": {},
}
],
)
def test_unnamed_inline_foreign_key_quoted(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("e1")
eq_(
fks,
[
{
"referred_table": "a2",
"referred_columns": ["some ( STUPID n,ame"],
"referred_schema": None,
"options": {},
"name": None,
"constrained_columns": ["some ( STUPID n,ame"],
}
],
)
fks = inspector.get_foreign_keys("e2")
eq_(
fks,
[
{
"referred_table": "a2",
"referred_columns": ["some ( STUPID n,ame"],
"referred_schema": None,
"options": {},
"name": None,
"constrained_columns": ["some ( STUPID n,ame"],
}
],
)
def test_foreign_key_composite_broken_casing(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("j")
eq_(
fks,
[
{
"referred_table": "i",
"referred_columns": ["x", "y"],
"referred_schema": None,
"name": None,
"constrained_columns": ["q", "p"],
"options": {},
}
],
)
fks = inspector.get_foreign_keys("k")
eq_(
fks,
[
{
"referred_table": "i",
"referred_columns": ["x", "y"],
"referred_schema": None,
"name": "my_fk",
"constrained_columns": ["q", "p"],
"options": {},
}
],
)
def test_foreign_key_ondelete_onupdate(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("onud_test")
eq_(
fks,
[
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk1",
"constrained_columns": ["c1"],
"options": {"ondelete": "SET NULL"},
},
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk2",
"constrained_columns": ["c2"],
"options": {"onupdate": "CASCADE"},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk3",
"constrained_columns": ["c3"],
"options": {"ondelete": "CASCADE", "onupdate": "SET NULL"},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk4",
"constrained_columns": ["c4"],
"options": {"onupdate": "NO ACTION"},
},
],
)
def test_foreign_key_options_unnamed_inline(self):
with testing.db.connect() as conn:
conn.execute(
"create table foo (id integer, "
"foreign key (id) references bar (id) on update cascade)"
)
insp = inspect(conn)
eq_(
insp.get_foreign_keys("foo"),
[
{
"name": None,
"referred_columns": ["id"],
"referred_table": "bar",
"constrained_columns": ["id"],
"referred_schema": None,
"options": {"onupdate": "CASCADE"},
}
],
)
def test_dont_reflect_autoindex(self):
inspector = Inspector(testing.db)
eq_(inspector.get_indexes("o"), [])
eq_(
inspector.get_indexes("o", include_auto_indexes=True),
[
{
"unique": 1,
"name": "sqlite_autoindex_o_1",
"column_names": ["foo"],
}
],
)
def test_create_index_with_schema(self):
"""Test creation of index with explicit schema"""
inspector = Inspector(testing.db)
eq_(
inspector.get_indexes("l", schema="main"),
[
{
"unique": 0,
"name": u"ix_main_l_bar",
"column_names": [u"bar"],
}
],
)
def test_unique_constraint_named(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("f"),
[{"column_names": ["x"], "name": "foo_fx"}],
)
def test_unique_constraint_named_broken_casing(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("h"),
[{"column_names": ["x"], "name": "foo_hx"}],
)
def test_unique_constraint_named_broken_temp(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("g"),
[{"column_names": ["x"], "name": "foo_gx"}],
)
def test_unique_constraint_unnamed_inline(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("d"),
[{"column_names": ["x"], "name": None}],
)
def test_unique_constraint_unnamed_inline_quoted(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("d1"),
[{"column_names": ["some ( STUPID n,ame"], "name": None}],
)
eq_(
inspector.get_unique_constraints("d2"),
[{"column_names": ["some STUPID n,ame"], "name": None}],
)
eq_(
inspector.get_unique_constraints("d3"),
[{"column_names": ["some STUPID n,ame"], "name": None}],
)
def test_unique_constraint_unnamed_normal(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("m"),
[{"column_names": ["x"], "name": None}],
)
def test_unique_constraint_unnamed_normal_temporary(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("n"),
[{"column_names": ["x"], "name": None}],
)
def test_primary_key_constraint_named(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_pk_constraint("p"),
{"constrained_columns": ["id"], "name": "pk_name"},
)
def test_primary_key_constraint_unnamed(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_pk_constraint("q"),
{"constrained_columns": ["id"], "name": None},
)
def test_primary_key_constraint_no_pk(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_pk_constraint("d"),
{"constrained_columns": [], "name": None},
)
def test_check_constraint(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_check_constraints("cp"),
[
{"sqltext": "q > 1 AND q < 6", "name": None},
{"sqltext": "q == 1 OR (q > 2 AND q < 5)", "name": "cq"},
],
)
class SavepointTest(fixtures.TablesTest):
"""test that savepoints work when we use the correct event setup"""
__only_on__ = "sqlite"
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True),
Column("user_name", String),
)
@classmethod
def setup_bind(cls):
engine = engines.testing_engine(options={"use_reaper": False})
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.execute("BEGIN")
return engine
def test_nested_subtransaction_rollback(self):
users = self.tables.users
connection = self.bind.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans2.rollback()
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
eq_(
connection.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (3,)],
)
connection.close()
def test_nested_subtransaction_commit(self):
users = self.tables.users
connection = self.bind.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans2.commit()
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
eq_(
connection.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,), (3,)],
)
connection.close()
def test_rollback_to_subtransaction(self):
users = self.tables.users
connection = self.bind.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans3 = connection.begin()
connection.execute(users.insert(), user_id=3, user_name="user3")
trans3.rollback()
connection.execute(users.insert(), user_id=4, user_name="user4")
transaction.commit()
eq_(
connection.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (4,)],
)
connection.close()
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "sqlite"
def _fixed_lookup_fixture(self):
return [
(sqltypes.String(), sqltypes.VARCHAR()),
(sqltypes.String(1), sqltypes.VARCHAR(1)),
(sqltypes.String(3), sqltypes.VARCHAR(3)),
(sqltypes.Text(), sqltypes.TEXT()),
(sqltypes.Unicode(), sqltypes.VARCHAR()),
(sqltypes.Unicode(1), sqltypes.VARCHAR(1)),
(sqltypes.UnicodeText(), sqltypes.TEXT()),
(sqltypes.CHAR(3), sqltypes.CHAR(3)),
(sqltypes.NUMERIC, sqltypes.NUMERIC()),
(sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)),
(sqltypes.Numeric, sqltypes.NUMERIC()),
(sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)),
(sqltypes.DECIMAL, sqltypes.DECIMAL()),
(sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)),
(sqltypes.INTEGER, sqltypes.INTEGER()),
(sqltypes.BIGINT, sqltypes.BIGINT()),
(sqltypes.Float, sqltypes.FLOAT()),
(sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()),
(sqltypes.DATETIME, sqltypes.DATETIME()),
(sqltypes.DateTime, sqltypes.DATETIME()),
(sqltypes.DateTime(), sqltypes.DATETIME()),
(sqltypes.DATE, sqltypes.DATE()),
(sqltypes.Date, sqltypes.DATE()),
(sqltypes.TIME, sqltypes.TIME()),
(sqltypes.Time, sqltypes.TIME()),
(sqltypes.BOOLEAN, sqltypes.BOOLEAN()),
(sqltypes.Boolean, sqltypes.BOOLEAN()),
(
sqlite.DATE(storage_format="%(year)04d%(month)02d%(day)02d"),
sqltypes.DATE(),
),
(
sqlite.TIME(
storage_format="%(hour)02d%(minute)02d%(second)02d"
),
sqltypes.TIME(),
),
(
sqlite.DATETIME(
storage_format="%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d"
),
sqltypes.DATETIME(),
),
]
def _unsupported_args_fixture(self):
return [
("INTEGER(5)", sqltypes.INTEGER()),
("DATETIME(6, 12)", sqltypes.DATETIME()),
]
def _type_affinity_fixture(self):
return [
("LONGTEXT", sqltypes.TEXT()),
("TINYINT", sqltypes.INTEGER()),
("MEDIUMINT", sqltypes.INTEGER()),
("INT2", sqltypes.INTEGER()),
("UNSIGNED BIG INT", sqltypes.INTEGER()),
("INT8", sqltypes.INTEGER()),
("CHARACTER(20)", sqltypes.TEXT()),
("CLOB", sqltypes.TEXT()),
("CLOBBER", sqltypes.TEXT()),
("VARYING CHARACTER(70)", sqltypes.TEXT()),
("NATIVE CHARACTER(70)", sqltypes.TEXT()),
("BLOB", sqltypes.BLOB()),
("BLOBBER", sqltypes.NullType()),
("DOUBLE PRECISION", sqltypes.REAL()),
("FLOATY", sqltypes.REAL()),
("SOMETHING UNKNOWN", sqltypes.NUMERIC()),
]
def _fixture_as_string(self, fixture):
for from_, to_ in fixture:
if isinstance(from_, sqltypes.TypeEngine):
from_ = str(from_.compile())
elif isinstance(from_, type):
from_ = str(from_().compile())
yield from_, to_
def _test_lookup_direct(self, fixture, warnings=False):
dialect = sqlite.dialect()
for from_, to_ in self._fixture_as_string(fixture):
if warnings:
def go():
return dialect._resolve_type_affinity(from_)
final_type = testing.assert_warnings(
go, ["Could not instantiate"], regex=True
)
else:
final_type = dialect._resolve_type_affinity(from_)
expected_type = type(to_)
is_(type(final_type), expected_type)
def _test_round_trip(self, fixture, warnings=False):
from sqlalchemy import inspect
conn = testing.db.connect()
for from_, to_ in self._fixture_as_string(fixture):
inspector = inspect(conn)
conn.execute("CREATE TABLE foo (data %s)" % from_)
try:
if warnings:
def go():
return inspector.get_columns("foo")[0]
col_info = testing.assert_warnings(
go, ["Could not instantiate"], regex=True
)
else:
col_info = inspector.get_columns("foo")[0]
expected_type = type(to_)
is_(type(col_info["type"]), expected_type)
# test args
for attr in ("scale", "precision", "length"):
if getattr(to_, attr, None) is not None:
eq_(
getattr(col_info["type"], attr),
getattr(to_, attr, None),
)
finally:
conn.execute("DROP TABLE foo")
def test_lookup_direct_lookup(self):
self._test_lookup_direct(self._fixed_lookup_fixture())
def test_lookup_direct_unsupported_args(self):
self._test_lookup_direct(
self._unsupported_args_fixture(), warnings=True
)
def test_lookup_direct_type_affinity(self):
self._test_lookup_direct(self._type_affinity_fixture())
def test_round_trip_direct_lookup(self):
self._test_round_trip(self._fixed_lookup_fixture())
def test_round_trip_direct_unsupported_args(self):
self._test_round_trip(self._unsupported_args_fixture(), warnings=True)
def test_round_trip_direct_type_affinity(self):
self._test_round_trip(self._type_affinity_fixture())
| apache-2.0 |
icereval/osf.io | addons/gitlab/models.py | 1 | 14594 | # -*- coding: utf-8 -*-
import os
import urlparse
from django.db import models
import markupsafe
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.gitlab import utils
from addons.gitlab.api import GitLabClient
from addons.gitlab.serializer import GitLabSerializer
from addons.gitlab import settings as gitlab_settings
from addons.gitlab.exceptions import ApiError, NotFoundError, GitLabError
from framework.auth import Auth
from osf.models.files import File, Folder, BaseFileNode
from website import settings
from website.util import web_url_for
hook_domain = gitlab_settings.HOOK_DOMAIN or settings.DOMAIN
class GitLabFileNode(BaseFileNode):
_provider = 'gitlab'
class GitLabFolder(GitLabFileNode, Folder):
pass
class GitLabFile(GitLabFileNode, File):
version_identifier = 'commitSha'
@property
def _hashes(self):
try:
return {'commit': self._history[-1]['extra']['commitSha']}
except (IndexError, KeyError):
return None
def touch(self, auth_header, revision=None, ref=None, branch=None, **kwargs):
revision = revision or ref or branch
return super(GitLabFile, self).touch(auth_header, revision=revision, **kwargs)
class GitLabProvider(object):
name = 'GitLab'
short_name = 'gitlab'
serializer = GitLabSerializer
def __init__(self, account=None):
super(GitLabProvider, self).__init__() # this does exactly nothing...
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.display_name if self.account else 'anonymous'
)
class UserSettings(BaseOAuthUserSettings):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
repo_id = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
hook_secret = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True)
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def has_auth(self):
return bool(self.user_settings and self.user_settings.has_auth)
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='gitlab_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.repo_id = None
self.hook_id = None
self.hook_secret = None
def deauthorize(self, auth=None, log=True):
self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='gitlab_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.repo:
return 'https://{0}/{1}'.format(self.external_account.display_name, self.repo)
@property
def short_url(self):
if self.repo:
return self.repo
@property
def is_private(self):
connection = GitLabClient(external_account=self.external_account)
return not connection.repo(repo_id=self.repo_id)['public']
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('gitlab')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.user_settings and self.user_settings.has_auth:
valid_credentials = False
owner = self.user_settings.owner
connection = GitLabClient(external_account=self.external_account)
valid_credentials = True
try:
repos = connection.repos()
except GitLabError:
valid_credentials = False
if owner == user:
ret.update({'repos': repos})
ret.update({
'node_has_auth': True,
'gitlab_user': self.user or '',
'gitlab_repo': self.repo or '',
'gitlab_repo_id': self.repo_id if self.repo_id is not None else '0',
'gitlab_repo_full_name': '{0} / {1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'gitlab_host': self.external_account.display_name,
'gitlab_user_name': self.external_account.display_name,
'gitlab_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': valid_credentials,
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'host': 'https://{}'.format(self.external_account.oauth_secret),
'owner': self.user,
'repo': self.repo,
'repo_id': self.repo_id
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='gitlab')
if not metadata.get('extra'):
sha = None
urls = {}
else:
sha = metadata['extra']['fileSha']
urls = {
'view': '{0}?branch={1}'.format(url, sha),
'download': '{0}?action=download&branch={1}'.format(url, sha)
}
self.owner.add_log(
'gitlab_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'gitlab': {
'host': 'https://{0}'.format(self.external_account.display_name),
'user': self.user,
'repo': self.repo,
'sha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
connect = GitLabClient(external_account=self.external_account)
try:
repo = connect.repo(self.repo_id)
except (ApiError, GitLabError):
return
node_permissions = 'public' if node.is_public else 'private'
repo_permissions = 'private' if not repo['public'] else 'public'
if repo_permissions != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the GitLab '
'repo {user} / {repo} is {repo_perm}.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo_permissions),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo_permissions == 'private':
message += (
' Users can view the contents of this private GitLab '
'repository through this public project.'
)
else:
message += (
' The files in this GitLab repo can be viewed on GitLab '
'<u><a href="{url}">here</a></u>.'
).format(url=repo['http_url_to_repo'])
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'gitlab/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
# backwards compatibility -- TODO: is this necessary?
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the GitLab add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private GitLab repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on GitLab.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
#########
# Hooks #
#########
# TODO: Should Events be added here?
# TODO: Move hook logic to service
def add_hook(self, save=True):
if self.user_settings:
connect = GitLabClient(external_account=self.external_account)
secret = utils.make_hook_secret()
hook = connect.add_hook(
self.user, self.repo,
'web',
{
'url': urlparse.urljoin(
hook_domain,
os.path.join(
self.owner.api_url, 'gitlab', 'hook/'
)
),
'content_type': gitlab_settings.HOOK_CONTENT_TYPE,
'secret': secret,
},
events=gitlab_settings.HOOK_EVENTS,
)
if hook:
self.hook_id = hook.id
self.hook_secret = secret
if save:
self.save()
def delete_hook(self, save=True):
"""
:return bool: Hook was deleted
"""
if self.user_settings and self.hook_id:
connection = GitLabClient(external_account=self.external_account)
try:
response = connection.delete_hook(self.user, self.repo, self.hook_id)
except (GitLabError, NotFoundError):
return False
if response:
self.hook_id = None
if save:
self.save()
return True
return False
| apache-2.0 |
toabctl/osc | tests/test_revertfiles.py | 15 | 3281 | import osc.core
import osc.oscerr
import os
from common import OscTestCase
FIXTURES_DIR = os.path.join(os.getcwd(), 'revertfile_fixtures')
def suite():
import unittest
return unittest.makeSuite(TestRevertFiles)
class TestRevertFiles(OscTestCase):
def _get_fixtures_dir(self):
return FIXTURES_DIR
def testRevertUnchanged(self):
"""revert an unchanged file (state == ' ')"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.OscIOError, p.revert, 'toadd2')
self._check_status(p, 'toadd2', '?')
def testRevertModified(self):
"""revert a modified file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('nochange')
self.__check_file('nochange')
self._check_status(p, 'nochange', ' ')
def testRevertAdded(self):
"""revert an added file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('toadd1')
self.assertTrue(os.path.exists('toadd1'))
self._check_addlist('replaced\naddedmissing\n')
self._check_status(p, 'toadd1', '?')
def testRevertDeleted(self):
"""revert a deleted file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('somefile')
self.__check_file('somefile')
self._check_deletelist('deleted\n')
self._check_status(p, 'somefile', ' ')
def testRevertMissing(self):
"""revert a missing (state == '!') file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('missing')
self.__check_file('missing')
self._check_status(p, 'missing', ' ')
def testRevertMissingAdded(self):
"""revert a missing file which was added to the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('addedmissing')
self._check_addlist('toadd1\nreplaced\n')
self.assertRaises(osc.oscerr.OscIOError, p.status, 'addedmissing')
def testRevertReplaced(self):
"""revert a replaced (state == 'R') file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('replaced')
self.__check_file('replaced')
self._check_addlist('toadd1\naddedmissing\n')
self._check_status(p, 'replaced', ' ')
def testRevertConflict(self):
"""revert a file which is in the conflict state"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('foo')
self.__check_file('foo')
self.assertFalse(os.path.exists(os.path.join('.osc', '_in_conflict')))
self._check_status(p, 'foo', ' ')
def testRevertSkipped(self):
"""revert a skipped file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.OscIOError, p.revert, 'skipped')
def __check_file(self, fname):
storefile = os.path.join('.osc', fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(storefile))
self.assertEqual(open(fname, 'r').read(), open(storefile, 'r').read())
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-2.0 |
theoryno3/pylearn2 | pylearn2/utils/tests/test_video.py | 44 | 2040 | """Tests for pylearn2.utils.video"""
import numpy
from theano.compat import six
from pylearn2.compat import OrderedDict
from pylearn2.utils.video import FrameLookup, spatiotemporal_cubes
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2011, David Warde-Farley / Universite de Montreal"
__license__ = "BSD"
__maintainer__ = "David Warde-Farley"
__email__ = "wardefar@iro"
# TODO: write a test for get_video_dims, raising SkipTest
# if pyffmpeg can't be imported
def test_frame_lookup():
input_data = [('foo', 15), ('bar', 19), ('baz', 26)]
lookup = FrameLookup(input_data)
assert len(lookup) == (15 + 19 + 26)
assert lookup[15] == ('bar', 19, 0)
assert lookup[14] == ('foo', 15, 14)
assert lookup[15 + 19 + 4] == ('baz', 26, 4)
# The test below is crashing on Travis, though not on mkg's machine. Ian
# suggests commenting the test out for now, to fast-track PR #1133.
def test_spatiotemporal_cubes():
def check_patch_coverage(files):
rng = numpy.random.RandomState(1)
inputs = [(name, array.shape) for name, array in six.iteritems(files)]
shape = (5, 7, 7)
for fname, index in spatiotemporal_cubes(inputs, shape, 50000, rng):
cube = files[fname][index]
if len(files[fname].shape) == 3:
assert cube.shape == shape
else:
assert cube.shape[:3] == shape[:3]
cube[...] = True
for fname, array in six.iteritems(files):
assert array.all()
files = OrderedDict(
file1=numpy.zeros((10, 30, 21), dtype=bool),
file2=numpy.zeros((15, 25, 28), dtype=bool),
file3=numpy.zeros((7, 18, 22), dtype=bool),
)
check_patch_coverage(files)
# Check that stuff still works with an extra color channel dimension.
files = OrderedDict(
file1=numpy.zeros((10, 30, 21, 3), dtype=bool),
file2=numpy.zeros((15, 25, 28, 3), dtype=bool),
file3=numpy.zeros((7, 18, 22, 3), dtype=bool),
)
check_patch_coverage(files)
| bsd-3-clause |
tiborsimko/zenodo | tests/unit/auditor/test_records.py | 6 | 13412 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test for Zenodo Auditor Record checks."""
from __future__ import absolute_import, print_function
import logging
import pytest
from invenio_records.models import RecordMetadata
from zenodo.modules.auditor.records import RecordAudit, RecordCheck
from zenodo.modules.records.api import ZenodoRecord
@pytest.fixture()
def record_audit():
return RecordAudit('testAudit', logging.getLogger('auditorTesting'), [])
def test_record_audit(record_audit, full_record, db, communities, users,
oaiid_pid):
# Add the "ecfunded" community since it's usually being added automatically
# after processing a deposit if the record has an EC grant.
oaiid_pid.pid_value = full_record['communities'].append('ecfunded')
# Mint the OAI identifier
oaiid_pid.pid_value = full_record['_oai']['id']
db.session.add(oaiid_pid)
# Create the record metadata, to store the
record_model = RecordMetadata()
record_model.json = full_record
db.session.add(record_model)
db.session.commit()
record = ZenodoRecord(data=full_record, model=record_model)
check = RecordCheck(record_audit, record)
check.perform()
assert check.issues == {}
assert check.is_ok is True
assert check.dump() == {
'record': {
'recid': record['recid'],
'object_uuid': str(record.id),
},
'issues': {},
}
duplicate_community_params = (
([], None),
(['a', 'b'], None),
(['a', 'a', 'a', 'b'], ['a']),
(['a', 'a', 'b', 'b'], ['a', 'b']),
)
@pytest.mark.parametrize(('record_communities', 'issue'),
duplicate_community_params)
def test_duplicate_communities(record_audit, minimal_record,
record_communities, issue):
minimal_record.update({'communities': record_communities})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_communities()
result_issue = check.issues.get('communities', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
unresolvable_communities_params = (
([], None),
(['c1', 'c2', 'c3', 'c4', 'zenodo', 'ecfunded'], None),
(['foo'], ['foo']),
(['c1', 'c2', 'foo'], ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
)
@pytest.mark.parametrize(('record_communities', 'issue'),
unresolvable_communities_params)
def test_unresolvable_communities(record_audit, minimal_record, communities,
record_communities, issue):
minimal_record.update({'communities': record_communities})
check = RecordCheck(record_audit, minimal_record)
check._unresolvable_communities()
result_issue = check.issues.get('communities', {}).get('unresolvable')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
duplicate_owners_params = (
([1], None),
([1, 2, 3], None),
([1, 1, 1, 2], [1]),
([1, 1, 2, 2], [1, 2]),
)
@pytest.mark.parametrize(('record_owners', 'issue'), duplicate_owners_params)
def test_duplicate_owners(record_audit, minimal_record, record_owners, issue):
minimal_record.update({'owners': record_owners})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_owners()
result_issue = check.issues.get('owners', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
unresolvable_owners_params = (
([1], None),
([1, 2, 3], None),
([4], [4]),
([1, 2, 3, 4], [4]),
)
@pytest.mark.parametrize(('record_owners', 'issue'),
unresolvable_owners_params)
def test_unresolvable_owners(record_audit, minimal_record, users,
record_owners, issue):
minimal_record.update({'owners': record_owners})
check = RecordCheck(record_audit, minimal_record)
check._unresolvable_owners()
result_issue = check.issues.get('owners', {}).get('unresolvable')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
duplicate_grants_params = (
([], None),
([{'$ref': '1'}, {'$ref': '2'}], None),
([{'$ref': '1'}, {'$ref': '1'}], ['1']),
([{'$ref': '1'}, {'$ref': '1'}, {'$ref': '2'}], ['1']),
([{'$ref': '1'}, {'$ref': '1'}, {'$ref': '2'}, {'$ref': '2'}], ['1', '2']),
)
@pytest.mark.parametrize(('record_grants', 'issue'), duplicate_grants_params)
def test_duplicate_grants(record_audit, minimal_record, record_grants, issue):
minimal_record.update({'grants': record_grants})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_grants()
result_issue = check.issues.get('grants', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
duplicate_files_params = [
([{'key': 'a', 'version_id': 1}], None),
([{'key': 'a', 'version_id': 1},
{'key': 'b', 'version_id': 2},
{'key': 'c', 'version_id': 3}],
None),
([{'key': 'a', 'version_id': 1},
{'key': 'a', 'version_id': 2},
{'key': 'a', 'version_id': 3},
{'key': 'b', 'version_id': 4}],
[{'key': 'a', 'version_id': 1},
{'key': 'a', 'version_id': 2},
{'key': 'a', 'version_id': 3}]),
([{'key': 'a', 'version_id': 1},
{'key': 'b', 'version_id': 1},
{'key': 'c', 'version_id': 1},
{'key': 'd', 'version_id': 2}],
[{'key': 'a', 'version_id': 1},
{'key': 'b', 'version_id': 1},
{'key': 'c', 'version_id': 1}]),
]
@pytest.mark.parametrize(('record_files', 'issue'), duplicate_files_params)
def test_duplicate_files(record_audit, minimal_record, record_files, issue):
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_files()
result_issue = check.issues.get('files', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
missing_files_params = [
([{'key': 'a'}], False),
([{'key': 'a'}, {'key': 'b'}], False),
(None, True),
([], True),
]
@pytest.mark.parametrize(('record_files', 'issue'), missing_files_params)
def test_missing_files(record_audit, minimal_record, record_files, issue):
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._missing_files()
result_issue = check.issues.get('files', {}).get('missing')
assert bool(result_issue) == bool(issue)
multiple_buckets_params = [
([{'bucket': 'a'}], None),
([{'bucket': 'a'}, {'bucket': 'a'}, {'bucket': 'a'}], None),
([{'bucket': 'a'}, {'bucket': 'a'}, {'bucket': 'b'}], ['a', 'b']),
([{'bucket': 'a'}, {'bucket': 'b'}, {'bucket': 'c'}], ['a', 'b', 'c']),
]
@pytest.mark.parametrize(('record_files', 'issue'), multiple_buckets_params)
def test_multiple_buckets(record_audit, minimal_record, record_files, issue):
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._multiple_buckets()
result_issue = check.issues.get('files', {}).get('multiple_buckets')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
bucket_mismatch_params = [
('a', [{'bucket': 'a'}], None),
('a', [{'key': 'f1', 'bucket': 'a'}, {'key': 'f2', 'bucket': 'a'}], None),
('a', [{'key': 'f1', 'bucket': 'b'}], [{'key': 'f1', 'bucket': 'b'}]),
('a', [{'key': 'f1', 'bucket': 'a'}, {'key': 'f2', 'bucket': 'b'}],
[{'key': 'f2', 'bucket': 'b'}]),
]
@pytest.mark.parametrize(('record_bucket', 'record_files', 'issue'),
bucket_mismatch_params)
def test_bucket_mismatch(record_audit, minimal_record, record_bucket,
record_files, issue):
minimal_record.update({'_buckets': {'record': record_bucket}})
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._bucket_mismatch()
result_issue = check.issues.get('files', {}).get('bucket_mismatch')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert result_issue == issue
oai_required_params = [
({'id': 'oai:zenodo.org:1', 'updated': '2016-01-01T12:00:00Z'}, None),
({}, {'id': True, 'updated': True}),
({'id': 'oai:zenodo.org:1'}, {'updated': True}),
({'updated': '2016-01-01T12:00:00Z'}, {'id': True}),
]
@pytest.mark.parametrize(('record_oai', 'issue'), oai_required_params)
def test_oai_required(record_audit, minimal_record, record_oai, issue):
minimal_record.update({'_oai': record_oai})
check = RecordCheck(record_audit, minimal_record)
check._oai_required()
result_issue = check.issues.get('oai', {}).get('missing')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
oai_non_minted_pid_params = [
({'id': 'oai:zenodo.org:123'}, None),
({'id': 'oai:zenodo.org:invalid'}, 'oai:zenodo.org:invalid'),
]
@pytest.mark.parametrize(('record_oai', 'issue'), oai_non_minted_pid_params)
def test_oai_non_minted_pid(record_audit, minimal_record, db, oaiid_pid,
record_oai, issue):
db.session.add(oaiid_pid)
db.session.commit()
minimal_record.update({'_oai': record_oai})
check = RecordCheck(record_audit, minimal_record)
check._oai_non_minted_pid()
result_issue = check.issues.get('oai', {}).get('non_minted_pid')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
oai_duplicate_sets_params = [
({}, None),
({'sets': ['a', 'b']}, None),
({'sets': ['a', 'a', 'a', 'b']}, ['a']),
({'sets': ['a', 'a', 'b', 'b']}, ['a', 'b']),
]
@pytest.mark.parametrize(('record_oai', 'issue'), oai_duplicate_sets_params)
def test_oai_duplicate_sets(record_audit, minimal_record, record_oai, issue):
minimal_record.update({'_oai': record_oai})
check = RecordCheck(record_audit, minimal_record)
check._oai_duplicate_sets()
result_issue = check.issues.get('oai', {}).get('duplicate_oai_sets')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
oai_community_correspondence = [
([], [], None),
(['a'], ['user-a'], None),
(['a', 'b'], ['user-a', 'user-b'], None),
(['a'], [], {'missing_oai_sets': ['user-a']}),
(['a', 'b'], ['user-a'], {'missing_oai_sets': ['user-b'], }),
([], ['user-a'], {'redundant_oai_sets': ['user-a']}),
(['a'], ['user-a', 'user-b'], {'redundant_oai_sets': ['user-b']}),
(['a'], ['user-b'],
{'redundant_oai_sets': ['user-b'], 'missing_oai_sets': ['user-a']}),
]
@pytest.mark.parametrize(('record_communities', 'record_oai', 'issue'),
oai_community_correspondence)
def test_oai_community_correspondence(record_audit, minimal_record, db,
record_communities, record_oai, issue):
minimal_record.update({'communities': record_communities})
minimal_record.update({'_oai': {'sets': record_oai}})
check = RecordCheck(record_audit, minimal_record)
check._oai_community_correspondence()
result_issue = check.issues.get('oai', {})
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
def test_jsonschema(app, record_audit, minimal_record):
check = RecordCheck(record_audit, ZenodoRecord(minimal_record))
check.jsonschema()
assert check.issues.get('jsonschema') is None
minimal_record['invalid_key'] = 'should not be here'
check = RecordCheck(record_audit, ZenodoRecord(minimal_record))
check.jsonschema()
assert check.issues.get('jsonschema')
| gpl-2.0 |
drpngx/tensorflow | tensorflow/python/client/device_lib.py | 42 | 1459 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python import pywrap_tensorflow
def list_local_devices(session_config=None):
"""List the available devices available in the local process.
Args:
session_config: a session config proto or None to use the default config.
Returns:
A list of `DeviceAttribute` protocol buffers.
"""
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
return [
_convert(s)
for s in pywrap_tensorflow.list_devices(session_config=session_config)
]
| apache-2.0 |
openstack/python-zaqarclient | zaqarclient/auth/__init__.py | 1 | 1268 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zaqarclient.auth import base
from zaqarclient.auth import keystone
from zaqarclient.auth import signed_url
_BACKENDS = {
'noauth': base.NoAuth,
'keystone': keystone.KeystoneAuth,
'signed-url': signed_url.SignedURLAuth,
}
def get_backend(backend='keystone', options=None):
"""Loads backend `auth_backend`
:params backend: The backend name to load.
Default: `keystone`
:type backend: `six.string_types`
:param options: Options to pass to the Auth
backend. Refer to the backend for more info.
:type options: `dict`
"""
if options is None:
options = {}
backend = _BACKENDS[backend](options)
return backend
| apache-2.0 |
mobiuscoin/p2pool-mobi | p2pool/bitcoin/sha256.py | 285 | 3084 | from __future__ import division
import struct
k = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
]
def process(state, chunk):
def rightrotate(x, n):
return (x >> n) | (x << 32 - n) % 2**32
w = list(struct.unpack('>16I', chunk))
for i in xrange(16, 64):
s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ (w[i-15] >> 3)
s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ (w[i-2] >> 10)
w.append((w[i-16] + s0 + w[i-7] + s1) % 2**32)
a, b, c, d, e, f, g, h = start_state = struct.unpack('>8I', state)
for k_i, w_i in zip(k, w):
t1 = (h + (rightrotate(e, 6) ^ rightrotate(e, 11) ^ rightrotate(e, 25)) + ((e & f) ^ (~e & g)) + k_i + w_i) % 2**32
a, b, c, d, e, f, g, h = (
(t1 + (rightrotate(a, 2) ^ rightrotate(a, 13) ^ rightrotate(a, 22)) + ((a & b) ^ (a & c) ^ (b & c))) % 2**32,
a, b, c, (d + t1) % 2**32, e, f, g,
)
return struct.pack('>8I', *((x + y) % 2**32 for x, y in zip(start_state, [a, b, c, d, e, f, g, h])))
initial_state = struct.pack('>8I', 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19)
class sha256(object):
digest_size = 256//8
block_size = 512//8
def __init__(self, data='', _=(initial_state, '', 0)):
self.state, self.buf, self.length = _
self.update(data)
def update(self, data):
state = self.state
buf = self.buf + data
chunks = [buf[i:i + self.block_size] for i in xrange(0, len(buf) + 1, self.block_size)]
for chunk in chunks[:-1]:
state = process(state, chunk)
self.state = state
self.buf = chunks[-1]
self.length += 8*len(data)
def copy(self, data=''):
return self.__class__(data, (self.state, self.buf, self.length))
def digest(self):
state = self.state
buf = self.buf + '\x80' + '\x00'*((self.block_size - 9 - len(self.buf)) % self.block_size) + struct.pack('>Q', self.length)
for chunk in [buf[i:i + self.block_size] for i in xrange(0, len(buf), self.block_size)]:
state = process(state, chunk)
return state
def hexdigest(self):
return self.digest().encode('hex')
| gpl-3.0 |
palerdot/calibre | src/calibre/library/database2.py | 4 | 164046 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal [email protected]'
__docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
json, uuid, hashlib, copy, types
from collections import defaultdict, namedtuple
import threading, random
from itertools import repeat
from calibre import prints, force_unicode
from calibre.ebooks.metadata import (title_sort, author_to_author_sort,
string_to_authors, get_title_sort_pat)
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.library.database import LibraryDatabase
from calibre.library.field_metadata import FieldMetadata, TagsIcons
from calibre.library.schema_upgrades import SchemaUpgrade
from calibre.library.caches import ResultCache
from calibre.library.custom_columns import CustomColumns
from calibre.library.sqlite import connect, IntegrityError
from calibre.library.prefs import DBPrefs
from calibre.ebooks.metadata.book.base import Metadata
from calibre.constants import preferred_encoding, iswindows, filesystem_encoding
from calibre.ptempfile import (PersistentTemporaryFile,
base_dir, SpooledTemporaryFile)
from calibre.customize.ui import (run_plugins_on_import,
run_plugins_on_postimport)
from calibre import isbytestring
from calibre.utils.filenames import (ascii_filename, samefile,
WindowsAtomicFolderMove, hardlink_file)
from calibre.utils.date import (utcnow, now as nowf, utcfromtimestamp,
parse_only_date, UNDEFINED_DATE, parse_date)
from calibre.utils.config import prefs, tweaks, from_json, to_json
from calibre.utils.icu import sort_key, strcmp, lower
from calibre.utils.search_query_parser import saved_searches, set_saved_searches
from calibre.ebooks import check_ebook_format
from calibre.utils.magick.draw import save_cover_data_to
from calibre.utils.recycle_bin import delete_file, delete_tree
from calibre.utils.formatter_functions import load_user_template_functions
from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict
from calibre.db.adding import find_books_in_directory, import_book_directory_multiple, import_book_directory, recursive_import
from calibre.db.errors import NoSuchFormat
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.db.categories import Tag, CATEGORY_SORTS
from calibre.utils.localization import (canonicalize_lang,
calibre_langcode_to_name)
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
SPOOL_SIZE = 30*1024*1024
ProxyMetadata = namedtuple('ProxyMetadata', 'book_size ondevice_col db_approx_formats')
class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'''
An ebook metadata database that stores references to ebook files on disk.
'''
PATH_LIMIT = 40 if 'win32' in sys.platform else 100
WINDOWS_LIBRARY_PATH_LIMIT = 75
@dynamic_property
def user_version(self):
doc = 'The user version of this database'
def fget(self):
return self.conn.get('pragma user_version;', all=False)
def fset(self, val):
self.conn.execute('pragma user_version=%d'%int(val))
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
@dynamic_property
def library_id(self):
doc = ('The UUID for this library. As long as the user only operates'
' on libraries with calibre, it will be unique')
def fget(self):
if self._library_id_ is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
def fset(self, val):
self._library_id_ = unicode(val)
self.conn.executescript('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES ("%s");
'''%self._library_id_)
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
def connect(self):
if iswindows and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%(259-4*self.PATH_LIMIT-10))
exists = os.path.exists(self.dbpath)
if not exists:
# Be more strict when creating new libraries as the old calculation
# allowed for max path lengths of 265 chars.
if (iswindows and len(self.library_path) >
self.WINDOWS_LIBRARY_PATH_LIMIT):
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%self.WINDOWS_LIBRARY_PATH_LIMIT)
self.conn = connect(self.dbpath, self.row_factory)
if exists and self.user_version == 0:
self.conn.close()
os.remove(self.dbpath)
self.conn = connect(self.dbpath, self.row_factory)
if self.user_version == 0:
self.initialize_database()
# remember to add any filter to the connect method in sqlite.py as well
# so that various code that connects directly will not complain about
# missing functions
self.books_list_filter = self.conn.create_dynamic_filter('books_list_filter')
# Store temporary tables in memory
self.conn.execute('pragma temp_store=2')
self.conn.commit()
@classmethod
def exists_at(cls, path):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False, is_second_db=False, progress_callback=None,
restore_all_prefs=False):
self.is_second_db = is_second_db
self.get_data_as_dict = types.MethodType(get_data_as_dict, self, LibraryDatabase2)
try:
if isbytestring(library_path):
library_path = library_path.decode(filesystem_encoding)
except:
traceback.print_exc()
self.field_metadata = FieldMetadata()
self.format_filename_cache = defaultdict(dict)
self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
if not os.path.exists(library_path):
os.makedirs(library_path)
self.listeners = set([])
self.library_path = os.path.abspath(library_path)
self.row_factory = row_factory
self.dbpath = os.path.join(library_path, 'metadata.db')
self.dbpath = os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH',
self.dbpath)
if read_only and os.path.exists(self.dbpath):
# Work on only a copy of metadata.db to ensure that
# metadata.db is not changed
pt = PersistentTemporaryFile('_metadata_ro.db')
pt.close()
shutil.copyfile(self.dbpath, pt.name)
self.dbpath = pt.name
apply_default_prefs = not os.path.exists(self.dbpath)
self.connect()
self.is_case_sensitive = (not iswindows and
not os.path.exists(self.dbpath.replace('metadata.db',
'MeTAdAtA.dB')))
SchemaUpgrade.__init__(self)
# Guarantee that the library_id is set
self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
if apply_default_prefs and default_prefs is not None:
if progress_callback is None:
progress_callback = lambda x, y: True
dbprefs = DBPrefs(self)
progress_callback(None, len(default_prefs))
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if not restore_all_prefs and key in frozenset(['news_to_be_synced']):
continue
dbprefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values() if f['is_custom']]
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'], f['datatype'],
f['is_multiple'] is not None and len(f['is_multiple']) > 0,
f['is_editable'], f['display'])
self.initialize_template_cache()
self.initialize_dynamic()
def initialize_template_cache(self):
self.formatter_template_cache = {}
def get_property(self, idx, index_is_id=False, loc=-1):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
return row[loc]
def initialize_dynamic(self):
self.field_metadata = FieldMetadata() # Ensure we start with a clean copy
self.prefs = DBPrefs(self)
defs = self.prefs.defaults
defs['gui_restriction'] = defs['cs_restriction'] = ''
defs['categories_using_hierarchy'] = []
defs['column_color_rules'] = []
defs['column_icon_rules'] = []
defs['grouped_search_make_user_categories'] = []
defs['similar_authors_search_key'] = 'authors'
defs['similar_authors_match_kind'] = 'match_any'
defs['similar_publisher_search_key'] = 'publisher'
defs['similar_publisher_match_kind'] = 'match_any'
defs['similar_tags_search_key'] = 'tags'
defs['similar_tags_match_kind'] = 'match_all'
defs['similar_series_search_key'] = 'series'
defs['similar_series_match_kind'] = 'match_any'
defs['book_display_fields'] = [
('title', False), ('authors', True), ('formats', True),
('series', True), ('identifiers', True), ('tags', True),
('path', True), ('publisher', False), ('rating', False),
('author_sort', False), ('sort', False), ('timestamp', False),
('uuid', False), ('comments', True), ('id', False), ('pubdate', False),
('last_modified', False), ('size', False), ('languages', False),
]
defs['virtual_libraries'] = {}
defs['virtual_lib_on_startup'] = defs['cs_virtual_lib_on_startup'] = ''
defs['virt_libs_hidden'] = defs['virt_libs_order'] = ()
# Migrate the bool tristate tweak
defs['bools_are_tristate'] = \
tweaks.get('bool_custom_columns_are_tristate', 'yes') == 'yes'
if self.prefs.get('bools_are_tristate') is None:
self.prefs.set('bools_are_tristate', defs['bools_are_tristate'])
# Migrate column coloring rules
if self.prefs.get('column_color_name_1', None) is not None:
from calibre.library.coloring import migrate_old_rule
old_rules = []
for i in range(1, 6):
col = self.prefs.get('column_color_name_'+str(i), None)
templ = self.prefs.get('column_color_template_'+str(i), None)
if col and templ:
try:
del self.prefs['column_color_name_'+str(i)]
rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules:
old_rules.append((col, templ))
except:
pass
if old_rules:
self.prefs['column_color_rules'] += old_rules
# Migrate saved search and user categories to db preference scheme
def migrate_preference(key, default):
oldval = prefs[key]
if oldval != default:
self.prefs[key] = oldval
prefs[key] = default
if key not in self.prefs:
self.prefs[key] = default
migrate_preference('user_categories', {})
migrate_preference('saved_searches', {})
if not self.is_second_db:
set_saved_searches(self, 'saved_searches')
# migrate grouped_search_terms
if self.prefs.get('grouped_search_terms', None) is None:
try:
ogst = tweaks.get('grouped_search_terms', {})
ngst = {}
for t in ogst:
ngst[icu_lower(t)] = ogst[t]
self.prefs.set('grouped_search_terms', ngst)
except:
pass
# migrate the gui_restriction preference to a virtual library
gr_pref = self.prefs.get('gui_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['gui_restriction'] = ''
self.prefs['virtual_lib_on_startup'] = gr_pref
# migrate the cs_restriction preference to a virtual library
gr_pref = self.prefs.get('cs_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['cs_restriction'] = ''
self.prefs['cs_virtual_lib_on_startup'] = gr_pref
# Rename any user categories with names that differ only in case
user_cats = self.prefs.get('user_categories', [])
catmap = {}
for uc in user_cats:
ucl = icu_lower(uc)
if ucl not in catmap:
catmap[ucl] = []
catmap[ucl].append(uc)
cats_changed = False
for uc in catmap:
if len(catmap[uc]) > 1:
prints('found user category case overlap', catmap[uc])
cat = catmap[uc][0]
suffix = 1
while icu_lower((cat + unicode(suffix))) in catmap:
suffix += 1
prints('Renaming user category %s to %s'%(cat, cat+unicode(suffix)))
user_cats[cat + unicode(suffix)] = user_cats[cat]
del user_cats[cat]
cats_changed = True
if cats_changed:
self.prefs.set('user_categories', user_cats)
if not self.is_second_db:
load_user_template_functions(self.library_id,
self.prefs.get('user_template_functions', []))
# Load the format filename cache
self.refresh_format_cache()
self.conn.executescript('''
DROP TRIGGER IF EXISTS author_insert_trg;
CREATE TEMP TRIGGER author_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
END;
DROP TRIGGER IF EXISTS author_update_trg;
CREATE TEMP TRIGGER author_update_trg
BEFORE UPDATE ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name)
WHERE id=NEW.id AND name <> NEW.name;
END;
''')
self.conn.execute(
'UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL')
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_filtered_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id and books_list_filter(book)) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.commit()
CustomColumns.__init__(self)
template = '''\
(SELECT {query} FROM books_{table}_link AS link INNER JOIN
{table} ON(link.{link_col}={table}.id) WHERE link.book=books.id)
{col}
'''
columns = ['id', 'title',
# col table link_col query
('authors', 'authors', 'author', 'sortconcat(link.id, name)'),
'timestamp',
'(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size',
('rating', 'ratings', 'rating', 'ratings.rating'),
('tags', 'tags', 'tag', 'group_concat(name)'),
'(SELECT text FROM comments WHERE book=books.id) comments',
('series', 'series', 'series', 'name'),
('publisher', 'publishers', 'publisher', 'name'),
'series_index',
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
'path',
'pubdate',
'uuid',
'has_cover',
('au_map', 'authors', 'author',
'aum_sortconcat(link.id, authors.name, authors.sort, authors.link)'),
'last_modified',
'(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
('languages', 'languages', 'lang_code',
'sortconcat(link.id, languages.lang_code)'),
]
lines = []
for col in columns:
line = col
if isinstance(col, tuple):
line = template.format(col=col[0], table=col[1],
link_col=col[2], query=col[3])
lines.append(line)
custom_map = self.custom_columns_in_meta()
# custom col labels are numbers (the id in the custom_columns table)
custom_cols = list(sorted(custom_map.keys()))
lines.extend([custom_map[x] for x in custom_cols])
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
'au_map':18, 'last_modified':19, 'identifiers':20, 'languages':21}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.values())
for col in custom_cols:
self.FIELD_MAP[col] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label'],
base,
prefer_custom=True)
if self.custom_column_num_map[col]['datatype'] == 'series':
# account for the series index column. Field_metadata knows that
# the series index is one larger than the series. If you change
# it here, be sure to change it there as well.
self.FIELD_MAP[str(col)+'_index'] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label']+'_index',
base,
prefer_custom=True)
self.FIELD_MAP['ondevice'] = base = base+1
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
self.FIELD_MAP['marked'] = base = base+1
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
self.FIELD_MAP['series_sort'] = base = base+1
self.field_metadata.set_field_record_index('series_sort', base, prefer_custom=False)
script = '''
DROP VIEW IF EXISTS meta2;
CREATE TEMP VIEW meta2 AS
SELECT
{0}
FROM books;
'''.format(', \n'.join(lines))
self.conn.executescript(script)
self.conn.commit()
# Reconstruct the user categories, putting them into field_metadata
# Assumption is that someone else will fix them if they change.
self.field_metadata.remove_dynamic_categories()
for user_cat in sorted(self.prefs.get('user_categories', {}).keys(), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
# add grouped search term user categories
muc = self.prefs.get('grouped_search_make_user_categories', [])
for cat in sorted(self.prefs.get('grouped_search_terms', {}).keys(), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
try:
self.field_metadata.add_user_category(label=u'@' + cat, name=cat)
except:
traceback.print_exc()
if len(saved_searches().names()):
self.field_metadata.add_search_category(label='search', name=_('Searches'))
self.field_metadata.add_grouped_search_terms(
self.prefs.get('grouped_search_terms', {}))
self.book_on_device_func = None
self.data = ResultCache(self.FIELD_MAP, self.field_metadata, db_prefs=self.prefs)
self.search = self.data.search
self.search_getting_ids = self.data.search_getting_ids
self.refresh = functools.partial(self.data.refresh, self)
self.sort = self.data.sort
self.multisort = self.data.multisort
self.index = self.data.index
self.refresh_ids = functools.partial(self.data.refresh_ids, self)
self.row = self.data.row
self.has_id = self.data.has_id
self.count = self.data.count
self.set_marked_ids = self.data.set_marked_ids
for prop in (
'author_sort', 'authors', 'comment', 'comments',
'publisher', 'rating', 'series', 'series_index', 'tags',
'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified', 'languages',
):
fm = {'comment':'comments', 'metadata_last_modified':
'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
d = self.conn.get('SELECT book FROM metadata_dirtied', all=True)
with self.dirtied_lock:
self.dirtied_sequence = 0
self.dirtied_cache = {}
for x in d:
self.dirtied_cache[x[0]] = self.dirtied_sequence
self.dirtied_sequence += 1
self.refresh_ondevice = functools.partial(self.data.refresh_ondevice, self)
self.refresh()
self.last_update_check = self.last_modified()
def break_cycles(self):
self.data.break_cycles()
self.data = self.field_metadata = self.prefs = self.listeners = \
self.refresh_ondevice = None
def initialize_database(self):
metadata_sqlite = P('metadata_sqlite.sql', data=True,
allow_user_override=False).decode('utf-8')
self.conn.executescript(metadata_sqlite)
self.conn.commit()
if self.user_version == 0:
self.user_version = 1
def saved_search_names(self):
return saved_searches().names()
def saved_search_rename(self, old_name, new_name):
saved_searches().rename(old_name, new_name)
def saved_search_lookup(self, name):
return saved_searches().lookup(name)
def saved_search_add(self, name, val):
saved_searches().add(name, val)
def saved_search_delete(self, name):
saved_searches().delete(name)
def saved_search_set_all(self, smap):
saved_searches().set_all(smap)
def last_modified(self):
''' Return last modified time as a UTC datetime object'''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
def refresh_format_cache(self):
self.format_filename_cache = defaultdict(dict)
for book_id, fmt, name in self.conn.get(
'SELECT book,format,name FROM data'):
self.format_filename_cache[book_id][fmt.upper() if fmt else ''] = name
self.format_metadata_cache = defaultdict(dict)
def check_if_modified(self):
if self.last_modified() > self.last_update_check:
self.refresh()
self.refresh_format_cache()
self.last_update_check = utcnow()
def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.'
row = self.data._data[index] if index_is_id else self.data[index]
return row[self.FIELD_MAP['path']].replace('/', os.sep)
def abspath(self, index, index_is_id=False, create_dirs=True):
'Return the absolute path to the directory containing this books files as a unicode string.'
path = os.path.join(self.library_path, self.path(index, index_is_id=index_is_id))
if create_dirs and not os.path.exists(path):
os.makedirs(path)
return path
def construct_path_name(self, id):
'''
Construct the directory name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
while author[-1] in (' ', '.'):
author = author[:-1]
if not author:
author = ascii_filename(_('Unknown')).decode(
'ascii', 'replace')
path = author + '/' + title + ' (%d)'%id
return path
def construct_file_name(self, id):
'''
Construct the file name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
name = title + ' - ' + author
while name.endswith('.'):
name = name[:-1]
return name
def rmtree(self, path, permanent=False):
if not self.normpath(self.library_path).startswith(self.normpath(path)):
delete_tree(path, permanent=permanent)
def normpath(self, path):
path = os.path.abspath(os.path.realpath(path))
if not self.is_case_sensitive:
path = os.path.normcase(path).lower()
return path
def set_path(self, index, index_is_id=False):
'''
Set the path to the directory containing this books files based on its
current title and author. If there was a previous directory, its contents
are copied and it is deleted.
'''
id = index if index_is_id else self.id(index)
path = self.construct_path_name(id)
current_path = self.path(id, index_is_id=True).replace(os.sep, '/')
formats = self.formats(id, index_is_id=True)
formats = formats.split(',') if formats else []
# Check if the metadata used to construct paths has changed
fname = self.construct_file_name(id)
changed = False
for format in formats:
name = self.format_filename_cache[id].get(format.upper(), None)
if name and name != fname:
changed = True
break
if path == current_path and not changed:
return
spath = os.path.join(self.library_path, *current_path.split('/'))
tpath = os.path.join(self.library_path, *path.split('/'))
source_ok = current_path and os.path.exists(spath)
wam = WindowsAtomicFolderMove(spath) if iswindows and source_ok else None
try:
if not os.path.exists(tpath):
os.makedirs(tpath)
if source_ok: # Migrate existing files
self.copy_cover_to(id, os.path.join(tpath, 'cover.jpg'),
index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
for format in formats:
copy_function = functools.partial(self.copy_format_to, id,
format, index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
try:
self.add_format(id, format, None, index_is_id=True,
path=tpath, notify=False, copy_function=copy_function)
except NoSuchFormat:
continue
self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id))
self.dirtied([id], commit=False)
self.conn.commit()
self.data.set(id, self.FIELD_MAP['path'], path, row_is_id=True)
# Delete not needed directories
if source_ok:
if not samefile(spath, tpath):
if wam is not None:
wam.delete_originals()
self.rmtree(spath, permanent=True)
parent = os.path.dirname(spath)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=True)
finally:
if wam is not None:
wam.close_handles()
curpath = self.library_path
c1, c2 = current_path.split('/'), path.split('/')
if not self.is_case_sensitive and len(c1) == len(c2):
# On case-insensitive systems, title and author renames that only
# change case don't cause any changes to the directories in the file
# system. This can lead to having the directory names not match the
# title/author, which leads to trouble when libraries are copied to
# a case-sensitive system. The following code attempts to fix this
# by checking each segment. If they are different because of case,
# then rename the segment to some temp file name, then rename it
# back to the correct name. Note that the code above correctly
# handles files in the directories, so no need to do them here.
for oldseg, newseg in zip(c1, c2):
if oldseg.lower() == newseg.lower() and oldseg != newseg:
try:
os.rename(os.path.join(curpath, oldseg),
os.path.join(curpath, newseg))
except:
break # Fail silently since nothing catastrophic has happened
curpath = os.path.join(curpath, newseg)
def add_listener(self, listener):
'''
Add a listener. Will be called on change events with two arguments.
Event name and list of affected ids.
'''
self.listeners.add(listener)
def notify(self, event, ids=[]):
'Notify all listeners'
for listener in self.listeners:
try:
listener(event, ids)
except:
traceback.print_exc()
continue
def cover(self, index, index_is_id=False, as_file=False, as_image=False,
as_path=False):
'''
Return the cover image as a bytestring (in JPEG format) or None.
WARNING: Using as_path will copy the cover to a temp file and return
the path to the temp file. You should delete the temp file when you are
done with it.
:param as_file: If True return the image as an open file object (a SpooledTemporaryFile)
:param as_image: If True return the image as a QImage object
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if as_path:
pt = PersistentTemporaryFile('_dbcover.jpg')
with pt:
shutil.copyfileobj(f, pt)
return pt.name
if as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
else:
ret = f.read()
if as_image:
from PyQt4.Qt import QImage
i = QImage()
i.loadFromData(ret)
ret = i
return ret
def cover_last_modified(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
try:
return utcfromtimestamp(os.stat(path).st_mtime)
except:
# Cover doesn't exist
pass
return self.last_modified()
### The field-style interface. These use field keys.
def get_field(self, idx, key, default=None, index_is_id=False):
mi = self.get_metadata(idx, index_is_id=index_is_id,
get_cover=key == 'cover')
return mi.get(key, default)
def standard_field_keys(self):
return self.field_metadata.standard_field_keys()
def custom_field_keys(self, include_composites=True):
return self.field_metadata.custom_field_keys(include_composites)
def all_field_keys(self):
return self.field_metadata.all_field_keys()
def sortable_field_keys(self):
return self.field_metadata.sortable_field_keys()
def searchable_fields(self):
return self.field_metadata.searchable_fields()
def search_term_to_field_key(self, term):
return self.field_metadata.search_term_to_field_key(term)
def custom_field_metadata(self, include_composites=True):
return self.field_metadata.custom_field_metadata(include_composites)
def all_metadata(self):
return self.field_metadata.all_metadata()
def metadata_for_field(self, key):
return self.field_metadata[key]
def clear_dirtied(self, book_id, sequence):
'''
Clear the dirtied indicator for the books. This is used when fetching
metadata, creating an OPF, and writing a file are separated into steps.
The last step is clearing the indicator
'''
with self.dirtied_lock:
dc_sequence = self.dirtied_cache.get(book_id, None)
# print 'clear_dirty: check book', book_id, dc_sequence
if dc_sequence is None or sequence is None or dc_sequence == sequence:
# print 'needs to be cleaned'
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,))
self.conn.commit()
try:
del self.dirtied_cache[book_id]
except:
pass
elif dc_sequence is not None:
# print 'book needs to be done again'
pass
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
commit=True, callback=None):
'''
Write metadata for each record to an individual OPF file. If callback
is not None, it is called once at the start with the number of book_ids
being processed. And once for every book_id, with arguments (book_id,
mi, ok).
'''
if book_ids is None:
book_ids = [x[0] for x in self.conn.get(
'SELECT book FROM metadata_dirtied', all=True)]
if callback is not None:
book_ids = tuple(book_ids)
callback(len(book_ids), True, False)
for book_id in book_ids:
if not self.data.has_id(book_id):
if callback is not None:
callback(book_id, None, False)
continue
path, mi, sequence = self.get_metadata_for_dump(book_id)
if path is None:
if callback is not None:
callback(book_id, mi, False)
continue
try:
raw = metadata_to_opf(mi)
with lopen(path, 'wb') as f:
f.write(raw)
if remove_from_dirtied:
self.clear_dirtied(book_id, sequence)
except:
pass
if callback is not None:
callback(book_id, mi, True)
if commit:
self.conn.commit()
def update_last_modified(self, book_ids, commit=False, now=None):
if now is None:
now = nowf()
if book_ids:
self.conn.executemany(
'UPDATE books SET last_modified=? WHERE id=?',
[(now, book) for book in book_ids])
for book_id in book_ids:
self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if commit:
self.conn.commit()
def dirtied(self, book_ids, commit=True):
self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
if book in self.dirtied_cache:
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
self.conn.execute(
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
(book,))
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
with self.dirtied_lock:
l = len(self.dirtied_cache)
if l > 0:
# The random stuff is here to prevent a single book from
# blocking progress if its metadata cannot be written for some
# reason.
id_ = self.dirtied_cache.keys()[random.randint(0, l-1)]
sequence = self.dirtied_cache[id_]
return (id_, sequence)
return (None, None)
def dirty_queue_length(self):
return len(self.dirtied_cache)
def commit_dirty_cache(self):
'''
Set the dirty indication for every book in the cache. The vast majority
of the time, the indication will already be set. However, sometimes
exceptions may have prevented a commit, which may remove some dirty
indications from the DB. This call will put them back. Note that there
is no problem with setting a dirty indication for a book that isn't in
fact dirty. Just wastes a few cycles.
'''
with self.dirtied_lock:
book_ids = list(self.dirtied_cache.keys())
self.dirtied_cache = {}
self.dirtied(book_ids)
def get_metadata_for_dump(self, idx):
path, mi = (None, None)
# get the current sequence number for this book to pass back to the
# backup thread. This will avoid double calls in the case where the
# thread has not done the work between the put and the get_metadata
with self.dirtied_lock:
sequence = self.dirtied_cache.get(idx, None)
# print 'get_md_for_dump', idx, sequence
try:
# While a book is being created, the path is empty. Don't bother to
# try to write the opf, because it will go to the wrong folder.
if self.path(idx, index_is_id=True):
path = os.path.join(self.abspath(idx, index_is_id=True), 'metadata.opf')
mi = self.get_metadata(idx, index_is_id=True)
# Always set cover to cover.jpg. Even if cover doesn't exist,
# no harm done. This way no need to call dirtied when
# cover is set/removed
mi.cover = 'cover.jpg'
except:
# This almost certainly means that the book has been deleted while
# the backup operation sat in the queue.
pass
return (path, mi, sequence)
def get_metadata(self, idx, index_is_id=False, get_cover=False,
get_user_categories=True, cover_as_data=False):
'''
Convenience method to return metadata as a :class:`Metadata` object.
Note that the list of formats is not verified.
'''
idx = idx if index_is_id else self.id(idx)
try:
row = self.data._data[idx]
except:
row = None
if row is None:
raise ValueError('No book with id: %d'%idx)
fm = self.FIELD_MAP
mi = Metadata(None, template_cache=self.formatter_template_cache)
aut_list = row[fm['au_map']]
if aut_list:
aut_list = [p.split(':::') for p in aut_list.split(':#:') if p]
else:
aut_list = []
aum = []
aus = {}
aul = {}
try:
for (author, author_sort, link) in aut_list:
aut = author.replace('|', ',')
aum.append(aut)
aus[aut] = author_sort.replace('|', ',')
aul[aut] = link
except ValueError:
# Author has either ::: or :#: in it
for x in row[fm['authors']].split(','):
aum.append(x.replace('|', ','))
aul[aum[-1]] = ''
aus[aum[-1]] = aum[-1]
mi.title = row[fm['title']]
mi.authors = aum
mi.author_sort = row[fm['author_sort']]
mi.author_sort_map = aus
mi.author_link_map = aul
mi.comments = row[fm['comments']]
mi.publisher = row[fm['publisher']]
mi.timestamp = row[fm['timestamp']]
mi.pubdate = row[fm['pubdate']]
mi.uuid = row[fm['uuid']]
mi.title_sort = row[fm['sort']]
mi.last_modified = row[fm['last_modified']]
formats = row[fm['formats']]
mi.format_metadata = {}
if not formats:
good_formats = None
else:
formats = sorted(formats.split(','))
mi.format_metadata = FormatMetadata(self, idx, formats)
good_formats = FormatsList(formats, mi.format_metadata)
mi.formats = good_formats
mi.db_approx_formats = formats
mi._proxy_metadata = p = ProxyMetadata(row[fm['size']], row[fm['ondevice']], formats)
mi.book_size = p.book_size
mi.ondevice_col= p.ondevice_col
tags = row[fm['tags']]
if tags:
mi.tags = [i.strip() for i in tags.split(',')]
languages = row[fm['languages']]
if languages:
mi.languages = [i.strip() for i in languages.split(',')]
mi.series = row[fm['series']]
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
mi.set_identifiers(self.get_identifiers(idx, index_is_id=True))
mi.application_id = idx
mi.id = idx
mi.set_all_user_metadata(self.field_metadata.custom_field_metadata())
for key, meta in self.field_metadata.custom_iteritems():
if meta['datatype'] == 'composite':
mi.set(key, val=row[meta['rec_index']])
else:
val, extra = self.get_custom_and_extra(idx, label=meta['label'],
index_is_id=True)
mi.set(key, val=val, extra=extra)
user_cats = self.prefs['user_categories']
user_cat_vals = {}
if get_user_categories:
for ucat in user_cats:
res = []
for name,cat,ign in user_cats[ucat]:
v = mi.get(cat, None)
if isinstance(v, list):
if name in v:
res.append([name,cat])
elif name == v:
res.append([name,cat])
user_cat_vals[ucat] = res
mi.user_categories = user_cat_vals
if get_cover:
if cover_as_data:
cdata = self.cover(idx, index_is_id=True)
if cdata:
mi.cover_data = ('jpeg', cdata)
else:
mi.cover = self.cover(idx, index_is_id=True, as_path=True)
mi.has_cover = _('Yes') if self.has_cover(idx) else ''
return mi
def has_book(self, mi):
title = mi.title
if title:
if not isinstance(title, unicode):
title = title.decode(preferred_encoding, 'replace')
return bool(self.conn.get('SELECT id FROM books where title=?', (title,), all=False))
return False
def has_id(self, id_):
return self.data._data[id_] is not None
def books_with_same_title(self, mi, all_matches=True):
title = mi.title
ans = set()
if title:
title = lower(force_unicode(title))
for book_id in self.all_ids():
x = self.title(book_id, index_is_id=True)
if lower(x) == title:
ans.add(book_id)
if not all_matches:
break
return ans
def find_identical_books(self, mi):
fuzzy_title_patterns = [(re.compile(pat, re.IGNORECASE) if
isinstance(pat, basestring) else pat, repl) for pat, repl in
[
(r'[\[\](){}<>\'";,:#]', ''),
(get_title_sort_pat(), ''),
(r'[-._]', ' '),
(r'\s+', ' ')
]
]
def fuzzy_title(title):
title = title.strip().lower()
for pat, repl in fuzzy_title_patterns:
title = pat.sub(repl, title)
return title
identical_book_ids = set([])
if mi.authors:
try:
quathors = mi.authors[:10] # Too many authors causes parsing of
# the search expression to fail
query = u' and '.join([u'author:"=%s"'%(a.replace('"', '')) for a in
quathors])
qauthors = mi.authors[10:]
except ValueError:
return identical_book_ids
try:
book_ids = self.data.parse(query)
except:
traceback.print_exc()
return identical_book_ids
if qauthors and book_ids:
matches = set()
qauthors = {lower(x) for x in qauthors}
for book_id in book_ids:
aut = self.authors(book_id, index_is_id=True)
if aut:
aut = {lower(x.replace('|', ',')) for x in
aut.split(',')}
if aut.issuperset(qauthors):
matches.add(book_id)
book_ids = matches
for book_id in book_ids:
fbook_title = self.title(book_id, index_is_id=True)
fbook_title = fuzzy_title(fbook_title)
mbook_title = fuzzy_title(mi.title)
if fbook_title == mbook_title:
identical_book_ids.add(book_id)
return identical_book_ids
def remove_cover(self, id, notify=True, commit=True):
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError):
time.sleep(0.2)
os.remove(path)
self.conn.execute('UPDATE books SET has_cover=0 WHERE id=?', (id,))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], False, row_is_id=True)
if notify:
self.notify('cover', [id])
def set_cover(self, id, data, notify=True, commit=True):
'''
Set the cover for this book.
`data`: Can be either a QImage, QPixmap, file object or bytestring
'''
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
if not os.path.exists(base_path):
self.set_path(id, index_is_id=True)
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
self.dirtied([id])
if not os.path.exists(base_path):
os.makedirs(base_path)
path = os.path.join(base_path, 'cover.jpg')
if callable(getattr(data, 'save', None)):
data.save(path)
else:
if callable(getattr(data, 'read', None)):
data = data.read()
try:
save_cover_data_to(data, path)
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=1,last_modified=? WHERE id=?',
(now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
def has_cover(self, id):
return self.data.get(id, self.FIELD_MAP['cover'], row_is_id=True)
def set_has_cover(self, id, val):
dval = 1 if val else 0
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
(dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
return self.book_on_device_func(id)
return None
def book_on_device_string(self, id):
loc = []
count = 0
on = self.book_on_device(id)
if on is not None:
m, a, b, count = on[:4]
if m is not None:
loc.append(_('Main'))
if a is not None:
loc.append(_('Card A'))
if b is not None:
loc.append(_('Card B'))
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
def set_book_on_device_func(self, func):
self.book_on_device_func = func
def all_formats(self):
formats = self.conn.get('SELECT DISTINCT format from data')
if not formats:
return set([])
return set([f[0] for f in formats])
def format_files(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return [(v, k) for k, v in self.format_filename_cache[id].iteritems()]
def formats(self, index, index_is_id=False, verify_formats=True):
''' Return available formats as a comma separated list or None if there are no available formats '''
id_ = index if index_is_id else self.id(index)
formats = self.data.get(id_, self.FIELD_MAP['formats'], row_is_id=True)
if not formats:
return None
if not verify_formats:
return formats
formats = formats.split(',')
ans = []
for fmt in formats:
if self.format_abspath(id_, fmt, index_is_id=True) is not None:
ans.append(fmt)
if not ans:
return None
return ','.join(ans)
def has_format(self, index, format, index_is_id=False):
return self.format_abspath(index, format, index_is_id) is not None
def format_last_modified(self, id_, fmt):
m = self.format_metadata(id_, fmt)
if m:
return m['mtime']
def format_metadata(self, id_, fmt, allow_cache=True, update_db=False,
commit=False):
if not fmt:
return {}
fmt = fmt.upper()
if allow_cache:
x = self.format_metadata_cache[id_].get(fmt, None)
if x is not None:
return x
path = self.format_abspath(id_, fmt, index_is_id=True)
ans = {}
if path is not None:
stat = os.stat(path)
ans['path'] = path
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
self.format_metadata_cache[id_][fmt] = ans
if update_db:
self.conn.execute(
'UPDATE data SET uncompressed_size=? WHERE format=? AND'
' book=?', (stat.st_size, fmt, id_))
if commit:
self.conn.commit()
return ans
def format_hash(self, id_, fmt):
path = self.format_abspath(id_, fmt, index_is_id=True)
if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(id_, fmt))
sha = hashlib.sha256()
with lopen(path, 'rb') as f:
while True:
raw = f.read(SPOOL_SIZE)
sha.update(raw)
if len(raw) < SPOOL_SIZE:
break
return sha.hexdigest()
def format_path(self, index, fmt, index_is_id=False):
'''
This method is intended to be used only in those rare situations, like
Drag'n Drop, when you absolutely need the path to the original file.
Otherwise, use format(..., as_path=True).
Note that a networked backend will always return None.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no format: %s'%(id_, fmt))
return path
def format_abspath(self, index, format, index_is_id=False):
'''
Return absolute path to the ebook file of format `format`
WARNING: This method will return a dummy path for a network backend DB,
so do not rely on it, use format(..., as_path=True) instead.
Currently used only in calibredb list, the viewer and the catalogs (via
get_data_as_dict()).
Apart from the viewer, I don't believe any of the others do any file
I/O with the results of this call.
'''
id = index if index_is_id else self.id(index)
try:
name = self.format_filename_cache[id][format.upper()]
except:
return None
if name:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
format = ('.' + format.lower()) if format else ''
fmt_path = os.path.join(path, name+format)
if os.path.exists(fmt_path):
return fmt_path
try:
candidates = glob.glob(os.path.join(path, '*'+format))
except: # If path contains strange characters this throws an exc
candidates = []
if format and candidates and os.path.exists(candidates[0]):
try:
shutil.copyfile(candidates[0], fmt_path)
except:
# This can happen if candidates[0] or fmt_path is too long,
# which can happen if the user copied the library from a
# non windows machine to a windows machine.
return None
return fmt_path
def copy_format_to(self, index, fmt, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the format ``fmt`` to the file like object ``dest``. If the
specified format does not exist, raises :class:`NoSuchFormat` error.
dest can also be a path, in which case the format is copied to it, iff
the path is different from the current path (taking case sensitivity
into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no %s file'%(id_, fmt))
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if dest:
if samefile(path, dest):
# Ensure that the file has the same case as dest
try:
if path != dest:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
windows_atomic_move.copy_path_to(path, dest)
else:
if hasattr(dest, 'write'):
with lopen(path, 'rb') as f:
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
elif dest:
if samefile(dest, path):
if not self.is_case_sensitive and path != dest:
# Ensure that the file has the same case as dest
try:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
if use_hardlink:
try:
hardlink_file(path, dest)
return
except:
pass
with lopen(path, 'rb') as f, lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
def copy_cover_to(self, index, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the cover to the file like object ``dest``. Returns False
if no cover exists or dest is the same file as the current cover.
dest can also be a path in which case the cover is
copied to it iff the path is different from the current path (taking
case sensitivity into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
windows_atomic_move.copy_path_to(path, dest)
return True
else:
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if hasattr(dest, 'write'):
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
return True
elif dest and not samefile(dest, path):
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
return False
def format(self, index, format, index_is_id=False, as_file=False,
mode='r+b', as_path=False, preserve_filename=False):
'''
Return the ebook format as a bytestring or `None` if the format doesn't exist,
or we don't have permission to write to the ebook file.
:param as_file: If True the ebook format is returned as a file object. Note
that the file object is a SpooledTemporaryFile, so if what you want to
do is copy the format to another file, use :method:`copy_format_to`
instead for performance.
:param as_path: Copies the format file to a temp file and returns the
path to the temp file
:param preserve_filename: If True and returning a path the filename is
the same as that used in the library. Note that using
this means that repeated calls yield the same
temp file (which is re-created each time)
:param mode: This is ignored (present for legacy compatibility)
'''
path = self.format_abspath(index, format, index_is_id=index_is_id)
if path is not None:
with lopen(path, mode) as f:
if as_path:
if preserve_filename:
bd = base_dir()
d = os.path.join(bd, 'format_abspath')
try:
os.makedirs(d)
except:
pass
fname = os.path.basename(path)
ret = os.path.join(d, fname)
with lopen(ret, 'wb') as f2:
shutil.copyfileobj(f, f2)
else:
with PersistentTemporaryFile('.'+format.lower()) as pt:
shutil.copyfileobj(f, pt)
ret = pt.name
elif as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
# Various bits of code try to use the name as the default
# title when reading metadata, so set it
ret.name = f.name
else:
ret = f.read()
return ret
def add_format_with_hooks(self, index, format, fpath, index_is_id=False,
path=None, notify=True, replace=True):
npath = self.run_import_plugins(fpath, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
id = index if index_is_id else self.id(index)
retval = self.add_format(id, format, stream, replace=replace,
index_is_id=True, path=path, notify=notify)
run_plugins_on_postimport(self, id, format)
return retval
def add_format(self, index, format, stream, index_is_id=False, path=None,
notify=True, replace=True, copy_function=None):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if path is None:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
if name and not replace:
return False
name = self.construct_file_name(id)
ext = ('.' + format.lower()) if format else ''
dest = os.path.join(path, name+ext)
pdir = os.path.dirname(dest)
if not os.path.exists(pdir):
os.makedirs(pdir)
size = 0
if copy_function is not None:
copy_function(dest)
size = os.path.getsize(dest)
else:
if (not getattr(stream, 'name', False) or not samefile(dest,
stream.name)):
with lopen(dest, 'wb') as f:
shutil.copyfileobj(stream, f)
size = f.tell()
elif os.path.exists(dest):
size = os.path.getsize(dest)
self.conn.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(id, format.upper(), size, name))
self.update_last_modified([id], commit=False)
self.conn.commit()
self.format_filename_cache[id][format.upper()] = name
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
return True
def save_original_format(self, book_id, fmt, notify=True):
fmt = fmt.upper()
if 'ORIGINAL' in fmt:
raise ValueError('Cannot save original of an original fmt')
opath = self.format_abspath(book_id, fmt, index_is_id=True)
if opath is None:
return False
nfmt = 'ORIGINAL_'+fmt
with lopen(opath, 'rb') as f:
return self.add_format(book_id, nfmt, f, index_is_id=True, notify=notify)
def original_fmt(self, book_id, fmt):
fmt = fmt
nfmt = ('ORIGINAL_%s'%fmt).upper()
opath = self.format_abspath(book_id, nfmt, index_is_id=True)
return fmt if opath is None else nfmt
def restore_original_format(self, book_id, original_fmt, notify=True):
opath = self.format_abspath(book_id, original_fmt, index_is_id=True)
if opath is not None:
fmt = original_fmt.partition('_')[2]
with lopen(opath, 'rb') as f:
self.add_format(book_id, fmt, f, index_is_id=True, notify=False)
self.remove_format(book_id, original_fmt, index_is_id=True, notify=notify)
return True
return False
def delete_book(self, id, notify=True, commit=True, permanent=False,
do_clean=True):
'''
Removes book from the result cache and the underlying database.
If you set commit to False, you must call clean() manually afterwards
'''
try:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
except:
path = None
if path and os.path.exists(path):
self.rmtree(path, permanent=permanent)
parent = os.path.dirname(path)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=permanent)
self.conn.execute('DELETE FROM books WHERE id=?', (id,))
if commit:
self.conn.commit()
if do_clean:
self.clean()
self.data.books_deleted([id])
if notify:
self.notify('delete', [id])
def remove_format(self, index, format, index_is_id=False, notify=True,
commit=True, db_only=False):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if name:
if not db_only:
try:
path = self.format_abspath(id, format, index_is_id=True)
if path:
delete_file(path)
except:
traceback.print_exc()
self.format_filename_cache[id].pop(format.upper(), None)
self.conn.execute('DELETE FROM data WHERE book=? AND format=?', (id, format.upper()))
if commit:
self.conn.commit()
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
def clean_standard_field(self, field, commit=False):
# Don't bother with validity checking. Let the exception fly out so
# we can see what happened
def doit(table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%table
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(table)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
table=table, ltable_col=ltable_col)
self.conn.execute(st)
fm = self.field_metadata[field]
doit(fm['table'], fm['link_column'])
if commit:
self.conn.commit()
def clean(self):
'''
Remove orphaned entries.
'''
def doit(ltable, table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%ltable
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(ltable)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
ltable=ltable, table=table, ltable_col=ltable_col)
self.conn.execute(st)
for ltable, table, ltable_col in [
('authors', 'authors', 'author'),
('publishers', 'publishers', 'publisher'),
('tags', 'tags', 'tag'),
('series', 'series', 'series'),
('languages', 'languages', 'lang_code'),
]:
doit(ltable, table, ltable_col)
for id_, tag in self.conn.get('SELECT id, name FROM tags', all=True):
if not tag.strip():
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?',
(id_,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id_,))
self.clean_custom()
self.conn.commit()
def get_books_for_category(self, category, id_):
ans = set([])
if category not in self.field_metadata:
return ans
field = self.field_metadata[category]
if field['datatype'] == 'composite':
dex = field['rec_index']
for book in self.data.iterall():
if field['is_multiple']:
vals = [v.strip() for v in
book[dex].split(field['is_multiple']['cache_to_list'])
if v.strip()]
if id_ in vals:
ans.add(book[0])
elif book[dex] == id_:
ans.add(book[0])
return ans
ans = self.conn.get(
'SELECT book FROM books_{tn}_link WHERE {col}=?'.format(
tn=field['table'], col=field['link_column']), (id_,))
return set(x[0] for x in ans)
########## data structures for get_categories
CATEGORY_SORTS = CATEGORY_SORTS
MATCH_TYPE = ('any', 'all')
class TCat_Tag(object):
def __init__(self, name, sort):
self.n = name
self.s = sort
self.c = 0
self.id_set = set()
self.rt = 0
self.rc = 0
self.id = None
def set_all(self, c, rt, rc, id):
self.c = c
self.rt = rt
self.rc = rc
self.id = id
def __str__(self):
return unicode(self)
def __unicode__(self):
return 'n=%s s=%s c=%d rt=%d rc=%d id=%s'%\
(self.n, self.s, self.c, self.rt, self.rc, self.id)
def clean_user_categories(self):
user_cats = self.prefs.get('user_categories', {})
new_cats = {}
for k in user_cats:
comps = [c.strip() for c in k.split('.') if c.strip()]
if len(comps) == 0:
i = 1
while True:
if unicode(i) not in user_cats:
new_cats[unicode(i)] = user_cats[k]
break
i += 1
else:
new_cats['.'.join(comps)] = user_cats[k]
try:
if new_cats != user_cats:
self.prefs.set('user_categories', new_cats)
except:
pass
return new_cats
def get_categories(self, sort='name', ids=None, icon_map=None):
#start = last = time.clock()
if icon_map is not None and type(icon_map) != TagsIcons:
raise TypeError('icon_map passed to get_categories must be of type TagIcons')
if sort not in self.CATEGORY_SORTS:
raise ValueError('sort ' + sort + ' not a valid value')
self.books_list_filter.change([] if not ids else ids)
id_filter = None if ids is None else frozenset(ids)
tb_cats = self.field_metadata
tcategories = {}
tids = {}
md = []
# First, build the maps. We need a category->items map and an
# item -> (item_id, sort_val) map to use in the books loop
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
or category in ['news', 'formats'] or cat.get('is_csp',
False):
continue
# Get the ids for the item values
if not cat['is_custom']:
funcs = {
'authors': self.get_authors_with_ids,
'series': self.get_series_with_ids,
'publisher': self.get_publishers_with_ids,
'tags': self.get_tags_with_ids,
'languages': self.get_languages_with_ids,
'rating': self.get_ratings_with_ids,
}
func = funcs.get(category, None)
if func:
list = func()
else:
raise ValueError(category + ' has no get with ids function')
else:
list = self.get_custom_items_with_ids(label=cat['label'])
tids[category] = {}
if category == 'authors':
for l in list:
(id, val, sort_val) = (l[0], l[1], l[2])
tids[category][val] = (id, sort_val)
elif category == 'languages':
for l in list:
id, val = l[0], calibre_langcode_to_name(l[1])
tids[category][l[1]] = (id, val)
elif cat['datatype'] == 'series':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, title_sort(val))
elif cat['datatype'] == 'rating':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, '{0:05.2f}'.format(val))
elif cat['datatype'] == 'text' and cat['is_multiple'] and \
cat['display'].get('is_names', False):
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, author_to_author_sort(val))
else:
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, val)
# add an empty category to the category map
tcategories[category] = {}
# create a list of category/field_index for the books scan to use.
# This saves iterating through field_metadata for each book
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), False))
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if cat['datatype'] == 'composite' and \
cat['display'].get('make_category', False):
tids[category] = {}
tcategories[category] = {}
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite'))
#print 'end phase "collection":', time.clock() - last, 'seconds'
#last = time.clock()
# Now scan every book looking for category items.
# Code below is duplicated because it shaves off 10% of the loop time
id_dex = self.FIELD_MAP['id']
rating_dex = self.FIELD_MAP['rating']
tag_class = LibraryDatabase2.TCat_Tag
for book in self.data.iterall():
if id_filter is not None and book[id_dex] not in id_filter:
continue
rating = book[rating_dex]
# We kept track of all possible category field_map positions above
for (cat, dex, mult, is_comp) in md:
if not book[dex]:
continue
tid_cat = tids[cat]
tcats_cat = tcategories[cat]
if not mult:
val = book[dex]
if is_comp:
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, val)
tcats_cat[val] = item
item.c += 1
item.id = val
if rating > 0:
item.rt += rating
item.rc += 1
continue
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
else:
vals = book[dex].split(mult)
if is_comp:
vals = [v.strip() for v in vals if v.strip()]
for val in vals:
if val not in tid_cat:
tid_cat[val] = (val, val)
for val in vals:
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
#print 'end phase "books":', time.clock() - last, 'seconds'
#last = time.clock()
# Now do news
tcategories['news'] = {}
cat = tb_cats['news']
tn = cat['table']
cn = cat['column']
if ids is None:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_{1}'''.format(cn, tn)
else:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_filtered_{1}'''.format(cn, tn)
# results will be sorted later
data = self.conn.get(query)
for r in data:
item = LibraryDatabase2.TCat_Tag(r[1], r[1])
item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0])
tcategories['news'][r[1]] = item
#print 'end phase "news":', time.clock() - last, 'seconds'
#last = time.clock()
# Build the real category list by iterating over the temporary copy
# and building the Tag instances.
categories = {}
tag_class = Tag
for category in tb_cats.iterkeys():
if category not in tcategories:
continue
cat = tb_cats[category]
# prepare the place where we will put the array of Tags
categories[category] = []
# icon_map is not None if get_categories is to store an icon and
# possibly a tooltip in the tag structure.
icon = None
label = tb_cats.key_to_label(category)
if icon_map:
if not tb_cats.is_custom_field(category):
if category in icon_map:
icon = icon_map[label]
else:
icon = icon_map['custom:']
icon_map[category] = icon
datatype = cat['datatype']
avgr = lambda x: 0.0 if x.rc == 0 else x.rt/x.rc
# Duplicate the build of items below to avoid using a lambda func
# in the main Tag loop. Saves a few %
if datatype == 'rating':
formatter = (lambda x:u'\u2605'*int(x/2))
avgr = lambda x: x.n
# eliminate the zero ratings line as well as count == 0
items = [v for v in tcategories[category].values() if v.c > 0 and v.n != 0]
elif category == 'authors':
# Clean up the authors strings to human-readable form
formatter = (lambda x: x.replace('|', ','))
items = [v for v in tcategories[category].values() if v.c > 0]
elif category == 'languages':
# Use a human readable language string
formatter = calibre_langcode_to_name
items = [v for v in tcategories[category].values() if v.c > 0]
else:
formatter = (lambda x:unicode(x))
items = [v for v in tcategories[category].values() if v.c > 0]
# sort the list
if sort == 'name':
kf = lambda x:sort_key(x.s)
reverse=False
elif sort == 'popularity':
kf = lambda x: x.c
reverse=True
else:
kf = avgr
reverse=True
items.sort(key=kf, reverse=reverse)
if tweaks['categories_use_field_for_author_name'] == 'author_sort' and\
(category == 'authors' or
(cat['display'].get('is_names', False) and
cat['is_custom'] and cat['is_multiple'] and
cat['datatype'] == 'text')):
use_sort_as_name = True
else:
use_sort_as_name = False
is_editable = (category not in ['news', 'rating', 'languages'] and
datatype != "composite")
categories[category] = [tag_class(formatter(r.n), count=r.c, id=r.id,
avg=avgr(r), sort=r.s, icon=icon,
category=category,
id_set=r.id_set, is_editable=is_editable,
use_sort_as_name=use_sort_as_name)
for r in items]
#print 'end phase "tags list":', time.clock() - last, 'seconds'
#last = time.clock()
# Needed for legacy databases that have multiple ratings that
# map to n stars
for r in categories['rating']:
r.id_set = None
for x in categories['rating']:
if r.name == x.name and r.id != x.id:
r.count = r.count + x.count
categories['rating'].remove(x)
break
# We delayed computing the standard formats category because it does not
# use a view, but is computed dynamically
categories['formats'] = []
icon = None
if icon_map and 'formats' in icon_map:
icon = icon_map['formats']
for fmt in self.conn.get('SELECT DISTINCT format FROM data'):
fmt = fmt[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=? AND
books_list_filter(book)''', (fmt,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=?''', (fmt,),
all=False)
if count > 0:
categories['formats'].append(Tag(fmt, count=count, icon=icon,
category='formats', is_editable=False))
if sort == 'popularity':
categories['formats'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['formats'].sort(key=lambda x:x.name)
# Now do identifiers. This works like formats
categories['identifiers'] = []
icon = None
if icon_map and 'identifiers' in icon_map:
icon = icon_map['identifiers']
for ident in self.conn.get('SELECT DISTINCT type FROM identifiers'):
ident = ident[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(book)
FROM identifiers
WHERE type=? AND
books_list_filter(book)''', (ident,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM identifiers
WHERE type=?''', (ident,),
all=False)
if count > 0:
categories['identifiers'].append(Tag(ident, count=count, icon=icon,
category='identifiers',
is_editable=False))
if sort == 'popularity':
categories['identifiers'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['identifiers'].sort(key=lambda x:x.name)
#### Now do the user-defined categories. ####
user_categories = dict.copy(self.clean_user_categories())
# We want to use same node in the user category as in the source
# category. To do that, we need to find the original Tag node. There is
# a time/space tradeoff here. By converting the tags into a map, we can
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c in categories.keys():
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), categories[c]))
muc = self.prefs.get('grouped_search_make_user_categories', [])
gst = self.prefs.get('grouped_search_terms', {})
for c in gst:
if c not in muc:
continue
user_categories[c] = []
for sc in gst[c]:
if sc in categories.keys():
for t in categories[sc]:
user_categories[c].append([t.name, sc, 0])
gst_icon = icon_map['gst'] if icon_map else None
for user_cat in sorted(user_categories.keys(), key=sort_key):
items = []
names_seen = {}
for (name,label,ign) in user_categories[user_cat]:
n = icu_lower(name)
if label in taglist and n in taglist[label]:
if user_cat in gst:
# for gst items, make copy and consolidate the tags by name.
if n in names_seen:
t = names_seen[n]
t.id_set |= taglist[label][n].id_set
t.count += taglist[label][n].count
t.tooltip = t.tooltip.replace(')', ', ' + label + ')')
else:
t = copy.copy(taglist[label][n])
t.icon = gst_icon
names_seen[t.name] = t
items.append(t)
else:
items.append(taglist[label][n])
# else: do nothing, to not include nodes w zero counts
cat_name = '@' + user_cat # add the '@' to avoid name collision
# Not a problem if we accumulate entries in the icon map
if icon_map is not None:
icon_map[cat_name] = icon_map['user:']
if sort == 'popularity':
categories[cat_name] = \
sorted(items, key=lambda x: x.count, reverse=True)
elif sort == 'name':
categories[cat_name] = \
sorted(items, key=lambda x: sort_key(x.sort))
else:
categories[cat_name] = \
sorted(items, key=lambda x:x.avg_rating, reverse=True)
#### Finally, the saved searches category ####
items = []
icon = None
if icon_map and 'search' in icon_map:
icon = icon_map['search']
for srch in saved_searches().names():
items.append(Tag(srch, tooltip=saved_searches().lookup(srch),
sort=srch, icon=icon, category='search',
is_editable=False))
if len(items):
if icon_map is not None:
icon_map['search'] = icon_map['search']
categories['search'] = items
#print 'last phase ran in:', time.clock() - last, 'seconds'
#print 'get_categories ran in:', time.clock() - start, 'seconds'
return categories
############# End get_categories
def tags_older_than(self, tag, delta, must_have_tag=None,
must_have_authors=None):
'''
Return the ids of all books having the tag ``tag`` that are older than
than the specified time. tag comparison is case insensitive.
:param delta: A timedelta object or None. If None, then all ids with
the tag are returned.
:param must_have_tag: If not None the list of matches will be
restricted to books that have this tag
:param must_have_authors: A list of authors. If not None the list of
matches will be restricted to books that have these authors (case
insensitive).
'''
tag = tag.lower().strip()
mht = must_have_tag.lower().strip() if must_have_tag else None
now = nowf()
tindex = self.FIELD_MAP['timestamp']
gindex = self.FIELD_MAP['tags']
iindex = self.FIELD_MAP['id']
aindex = self.FIELD_MAP['authors']
mah = must_have_authors
if mah is not None:
mah = [x.replace(',', '|').lower() for x in mah]
mah = ','.join(mah)
for r in self.data._data:
if r is not None:
if delta is None or (now - r[tindex]) > delta:
if mah:
authors = r[aindex] or ''
if authors.lower() != mah:
continue
tags = r[gindex]
if tags:
tags = [x.strip() for x in tags.lower().split(',')]
if tag in tags and (mht is None or mht in tags):
yield r[iindex]
def get_next_series_num_for(self, series):
series_id = None
if series:
series_id = self.conn.get('SELECT id from series WHERE name=?',
(series,), all=False)
if series_id is None:
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
return float(tweaks['series_index_auto_increment'])
return 1.0
series_indices = self.conn.get(
('SELECT series_index FROM books WHERE id IN '
'(SELECT book FROM books_series_link where series=?) '
'ORDER BY series_index'),
(series_id,))
return self._get_next_series_num_for_list(series_indices)
def _get_next_series_num_for_list(self, series_indices):
return _get_next_series_num_for_list(series_indices)
def set(self, row, column, val, allow_case_change=False):
'''
Convenience method for setting the title, authors, publisher, tags or
rating
'''
id = self.data[row][0]
col = self.FIELD_MAP[column]
books_to_refresh = {id}
set_args = (row, col, val)
if column == 'authors':
val = string_to_authors(val)
books_to_refresh |= self.set_authors(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'title':
self.set_title(id, val, notify=False)
elif column == 'publisher':
books_to_refresh |= self.set_publisher(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'rating':
self.set_rating(id, val, notify=False)
elif column == 'tags':
books_to_refresh |= \
self.set_tags(id, [x.strip() for x in val.split(',') if x.strip()],
append=False, notify=False, allow_case_change=allow_case_change)
self.data.set(*set_args)
self.data.refresh_ids(self, [id])
self.set_path(id, True)
self.notify('metadata', [id])
return books_to_refresh
def set_metadata(self, id, mi, ignore_errors=False, set_title=True,
set_authors=True, commit=True, force_changes=False,
notify=True):
'''
Set metadata for the book `id` from the `Metadata` object `mi`
Setting force_changes=True will force set_metadata to update fields even
if mi contains empty values. In this case, 'None' is distinguished from
'empty'. If mi.XXX is None, the XXX is not replaced, otherwise it is.
The tags, identifiers, and cover attributes are special cases. Tags and
identifiers cannot be set to None so then will always be replaced if
force_changes is true. You must ensure that mi contains the values you
want the book to have. Covers are always changed if a new cover is
provided, but are never deleted. Also note that force_changes has no
effect on setting title or authors.
'''
if callable(getattr(mi, 'to_book_metadata', None)):
# Handle code passing in a OPF object instead of a Metadata object
mi = mi.to_book_metadata()
def doit(func, *args, **kwargs):
try:
func(*args, **kwargs)
except:
if ignore_errors:
traceback.print_exc()
else:
raise
def should_replace_field(attr):
return (force_changes and (mi.get(attr, None) is not None)) or \
not mi.is_null(attr)
path_changed = False
if set_title and mi.title:
self._set_title(id, mi.title)
path_changed = True
if set_authors:
if not mi.authors:
mi.authors = [_('Unknown')]
authors = []
for a in mi.authors:
authors += string_to_authors(a)
self._set_authors(id, authors)
path_changed = True
if path_changed:
self.set_path(id, index_is_id=True)
if should_replace_field('title_sort'):
self.set_title_sort(id, mi.title_sort, notify=False, commit=False)
if should_replace_field('author_sort'):
doit(self.set_author_sort, id, mi.author_sort, notify=False,
commit=False)
if should_replace_field('publisher'):
doit(self.set_publisher, id, mi.publisher, notify=False,
commit=False)
# Setting rating to zero is acceptable.
if mi.rating is not None:
doit(self.set_rating, id, mi.rating, notify=False, commit=False)
if should_replace_field('series'):
doit(self.set_series, id, mi.series, notify=False, commit=False)
# force_changes has no effect on cover manipulation
if mi.cover_data[1] is not None:
doit(self.set_cover, id, mi.cover_data[1], commit=False)
elif isinstance(mi.cover, basestring) and mi.cover:
if os.access(mi.cover, os.R_OK):
with lopen(mi.cover, 'rb') as f:
raw = f.read()
if raw:
doit(self.set_cover, id, raw, commit=False)
# if force_changes is true, tags are always replaced because the
# attribute cannot be set to None.
if should_replace_field('tags'):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if should_replace_field('comments'):
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
if should_replace_field('languages'):
doit(self.set_languages, id, mi.languages, notify=False, commit=False)
# Setting series_index to zero is acceptable
if mi.series_index is not None:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
if should_replace_field('pubdate'):
doit(self.set_pubdate, id, mi.pubdate, notify=False, commit=False)
if getattr(mi, 'timestamp', None) is not None:
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
# identifiers will always be replaced if force_changes is True
mi_idents = mi.get_identifiers()
if force_changes:
self.set_identifiers(id, mi_idents, notify=False, commit=False)
elif mi_idents:
identifiers = self.get_identifiers(id, index_is_id=True)
for key, val in mi_idents.iteritems():
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
self.set_identifiers(id, identifiers, notify=False, commit=False)
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi.iterkeys():
if key in self.field_metadata and \
user_mi[key]['datatype'] == self.field_metadata[key]['datatype'] and \
(user_mi[key]['datatype'] != 'text' or
user_mi[key]['is_multiple'] == self.field_metadata[key]['is_multiple']):
val = mi.get(key, None)
if force_changes or val is not None:
doit(self.set_custom, id, val=val, extra=mi.get_extra(key),
label=user_mi[key]['label'], commit=False, notify=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def authors_sort_strings(self, id, index_is_id=False):
'''
Given a book, return the list of author sort strings
for the book's authors
'''
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT sort
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (sort,) in aut_strings:
result.append(sort)
return result
# Given a book, return the map of author sort strings for the book's authors
def authors_with_sort_strings(self, id, index_is_id=False):
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT authors.id, authors.name, authors.sort, authors.link
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (id_, author, sort, link) in aut_strings:
result.append((id_, author.replace('|', ','), sort, link))
return result
# Given a book, return the author_sort string for authors of the book
def author_sort_from_book(self, id, index_is_id=False):
auts = self.authors_sort_strings(id, index_is_id)
return ' & '.join(auts).replace('|', ',')
# Given an author, return a list of books with that author
def books_for_author(self, id_, index_is_id=False):
id_ = id_ if index_is_id else self.id(id_)
books = self.conn.get('''
SELECT bl.book
FROM books_authors_link as bl
WHERE bl.author=?''', (id_,))
return [b[0] for b in books]
# Given a list of authors, return the author_sort string for the authors,
# preferring the author sort associated with the author over the computed
# string
def author_sort_from_authors(self, authors):
result = []
for aut in authors:
r = self.conn.get('SELECT sort FROM authors WHERE name=?',
(aut.replace(',', '|'),), all=False)
if r is None:
result.append(author_to_author_sort(aut))
else:
result.append(r)
return ' & '.join(result).replace('|', ',')
def _update_author_in_cache(self, id_, ss, final_authors):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (ss, id_))
self.data.set(id_, self.FIELD_MAP['authors'],
','.join([a.replace(',', '|') for a in final_authors]),
row_is_id=True)
self.data.set(id_, self.FIELD_MAP['author_sort'], ss, row_is_id=True)
aum = self.authors_with_sort_strings(id_, index_is_id=True)
self.data.set(id_, self.FIELD_MAP['au_map'],
':#:'.join([':::'.join((au.replace(',', '|'), aus, aul))
for (_, au, aus, aul) in aum]),
row_is_id=True)
def _set_authors(self, id, authors, allow_case_change=False):
if not authors:
authors = [_('Unknown')]
self.conn.execute('DELETE FROM books_authors_link WHERE book=?',(id,))
books_to_refresh = {id}
final_authors = []
for a in authors:
case_change = False
if not a:
continue
a = a.strip().replace(',', '|')
if not isinstance(a, unicode):
a = a.decode(preferred_encoding, 'replace')
aus = self.conn.get('SELECT id, name, sort FROM authors WHERE name=?', (a,))
if aus:
aid, name, sort = aus[0]
# Handle change of case
if name != a:
if allow_case_change:
ns = author_to_author_sort(a.replace('|', ','))
if strcmp(sort, ns) == 0:
sort = ns
self.conn.execute('''UPDATE authors SET name=?, sort=?
WHERE id=?''', (a, sort, aid))
case_change = True
else:
a = name
else:
aid = self.conn.execute('''INSERT INTO authors(name)
VALUES (?)''', (a,)).lastrowid
final_authors.append(a.replace('|', ','))
try:
self.conn.execute('''INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (id, aid))
except IntegrityError: # Sometimes books specify the same author twice in their metadata
pass
if case_change:
bks = self.conn.get('''SELECT book FROM books_authors_link
WHERE author=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
for bk in books_to_refresh:
ss = self.author_sort_from_book(id, index_is_id=True)
aus = self.author_sort(bk, index_is_id=True)
if strcmp(aus, ss) == 0:
self._update_author_in_cache(bk, ss, final_authors)
# This can repeat what was done above in rare cases. Let it.
ss = self.author_sort_from_book(id, index_is_id=True)
self._update_author_in_cache(id, ss, final_authors)
self.clean_standard_field('authors', commit=True)
return books_to_refresh
def windows_check_if_files_in_use(self, book_id):
'''
Raises an EACCES IOError if any of the files in the folder of book_id
are opened in another program on windows.
'''
if iswindows:
path = self.path(book_id, index_is_id=True)
if path:
spath = os.path.join(self.library_path, *path.split('/'))
wam = None
if os.path.exists(spath):
try:
wam = WindowsAtomicFolderMove(spath)
finally:
if wam is not None:
wam.close_handles()
def set_authors(self, id, authors, notify=True, commit=True,
allow_case_change=False):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
:param authors: A list of authors.
'''
self.windows_check_if_files_in_use(id)
books_to_refresh = self._set_authors(id, authors,
allow_case_change=allow_case_change)
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.set_path(id, index_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_title_sort(self, id, title_sort_, notify=True, commit=True):
if not title_sort_:
return False
if isbytestring(title_sort_):
title_sort_ = title_sort_.decode(preferred_encoding, 'replace')
self.conn.execute('UPDATE books SET sort=? WHERE id=?', (title_sort_, id))
self.data.set(id, self.FIELD_MAP['sort'], title_sort_, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
return True
def _set_title(self, id, title):
if not title:
return False
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
old_title = self.title(id, index_is_id=True)
# We cannot check if old_title == title as previous code might have
# already updated the cache
only_case_change = icu_lower(old_title) == icu_lower(title)
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
self.data.set(id, self.FIELD_MAP['title'], title, row_is_id=True)
if only_case_change:
# SQLite update trigger will not update sort on a case change
self.conn.execute('UPDATE books SET sort=? WHERE id=?',
(title_sort(title), id))
ts = self.conn.get('SELECT sort FROM books WHERE id=?', (id,),
all=False)
if ts:
self.data.set(id, self.FIELD_MAP['sort'], ts, row_is_id=True)
return True
def set_title(self, id, title, notify=True, commit=True):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
'''
self.windows_check_if_files_in_use(id)
if not self._set_title(id, title):
return
self.set_path(id, index_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_languages(self, book_id, languages, notify=True, commit=True):
self.conn.execute(
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
FROM books_languages_link WHERE
books_languages_link.lang_code=languages.id) < 1''')
books_to_refresh = set([book_id])
final_languages = []
for l in languages:
lc = canonicalize_lang(l)
if not lc or lc in final_languages or lc in ('und', 'zxx', 'mis',
'mul'):
continue
final_languages.append(lc)
lc_id = self.conn.get('SELECT id FROM languages WHERE lang_code=?',
(lc,), all=False)
if lc_id is None:
lc_id = self.conn.execute('''INSERT INTO languages(lang_code)
VALUES (?)''', (lc,)).lastrowid
self.conn.execute('''INSERT INTO books_languages_link(book, lang_code)
VALUES (?,?)''', (book_id, lc_id))
self.dirtied(books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(book_id, self.FIELD_MAP['languages'],
u','.join(final_languages), row_is_id=True)
if notify:
self.notify('metadata', [book_id])
return books_to_refresh
def set_timestamp(self, id, dt, notify=True, commit=True):
if dt:
if isinstance(dt, (unicode, bytes)):
dt = parse_date(dt, as_utc=True, assume_utc=False)
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_pubdate(self, id, dt, notify=True, commit=True):
if not dt:
dt = UNDEFINED_DATE
if isinstance(dt, basestring):
dt = parse_only_date(dt)
self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['pubdate'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_publisher(self, id, publisher, notify=True, commit=True,
allow_case_change=False):
self.conn.execute('DELETE FROM books_publishers_link WHERE book=?',(id,))
books_to_refresh = {id}
if publisher:
case_change = False
if not isinstance(publisher, unicode):
publisher = publisher.decode(preferred_encoding, 'replace')
pubx = self.conn.get('''SELECT id,name from publishers
WHERE name=?''', (publisher,))
if pubx:
aid, cur_name = pubx[0]
if publisher != cur_name:
if allow_case_change:
self.conn.execute('''UPDATE publishers SET name=?
WHERE id=?''', (publisher, aid))
case_change = True
else:
publisher = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('''INSERT INTO publishers(name)
VALUES (?)''', (publisher,)).lastrowid
self.conn.execute('''INSERT INTO books_publishers_link(book, publisher)
VALUES (?,?)''', (id, aid))
if case_change:
bks = self.conn.get('''SELECT book FROM books_publishers_link
WHERE publisher=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM publishers WHERE (SELECT COUNT(id)
FROM books_publishers_link
WHERE publisher=publishers.id) < 1''')
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_uuid(self, id, uuid, notify=True, commit=True):
if uuid:
self.conn.execute('UPDATE books SET uuid=? WHERE id=?', (uuid, id))
self.data.set(id, self.FIELD_MAP['uuid'], uuid, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def get_id_from_uuid(self, uuid):
if uuid:
return (self.data._uuid_map.get(uuid, None) or
self.conn.get('SELECT id FROM books WHERE uuid=?', (uuid,),
all=False))
# Convenience methods for tags_list_editor
# Note: we generally do not need to refresh_ids because library_view will
# refresh everything.
def get_ratings_with_ids(self):
result = self.conn.get('SELECT id,rating FROM ratings')
if not result:
return []
return result
def dirty_books_referencing(self, field, id, commit=True):
# Get the list of books to dirty -- all books that reference the item
table = self.field_metadata[field]['table']
link = self.field_metadata[field]['link_column']
bks = self.conn.get(
'SELECT book from books_{0}_link WHERE {1}=?'.format(table, link),
(id,))
books = []
for (book_id,) in bks:
books.append(book_id)
self.dirtied(books, commit=commit)
def get_tags_with_ids(self):
result = self.conn.get('SELECT id,name FROM tags')
if not result:
return []
return result
def get_languages_with_ids(self):
result = self.conn.get('SELECT id,lang_code FROM languages')
if not result:
return []
return result
def rename_tag(self, old_id, new_name):
# It is possible that new_name is in fact a set of names. Split it on
# comma to find out. If it is, then rename the first one and append the
# rest
new_names = [t.strip() for t in new_name.strip().split(',') if t.strip()]
new_name = new_names[0]
new_names = new_names[1:]
# get the list of books that reference the tag being changed
books = self.conn.get('''SELECT book from books_tags_link
WHERE tag=?''', (old_id,))
books = [b[0] for b in books]
new_id = self.conn.get(
'''SELECT id from tags
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
# easy cases. Simply rename the tag. Do it even if equal, in case
# there is a change of case
self.conn.execute('''UPDATE tags SET name=?
WHERE id=?''', (new_name, old_id))
new_id = old_id
else:
# It is possible that by renaming a tag, the tag will appear
# twice on a book. This will throw an integrity error, aborting
# all the changes. To get around this, we first delete any links
# to the new_id from books referencing the old_id, so that
# renaming old_id to new_id will be unique on the book
for book_id in books:
self.conn.execute('''DELETE FROM books_tags_link
WHERE book=? and tag=?''', (book_id, new_id))
# Change the link table to point at the new tag
self.conn.execute('''UPDATE books_tags_link SET tag=?
WHERE tag=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM tags WHERE id=?', (old_id,))
if new_names:
# have some left-over names to process. Add them to the book.
for book_id in books:
self.set_tags(book_id, new_names, append=True, notify=False,
commit=False)
self.dirtied(books, commit=False)
self.clean_standard_field('tags', commit=False)
self.conn.commit()
def delete_tag_using_id(self, id):
self.dirty_books_referencing('tags', id, commit=False)
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
def get_series_with_ids(self):
result = self.conn.get('SELECT id,name FROM series')
if not result:
return []
return result
def rename_series(self, old_id, new_name, change_index=True):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from series
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
self.conn.execute('UPDATE series SET name=? WHERE id=?',
(new_name, old_id))
else:
# New series exists. Must update the link, then assign a
# new series index to each of the books.
if change_index:
# Get the list of books where we must update the series index
books = self.conn.get('''SELECT books.id
FROM books, books_series_link as lt
WHERE books.id = lt.book AND lt.series=?
ORDER BY books.series_index''', (old_id,))
# Now update the link table
self.conn.execute('''UPDATE books_series_link
SET series=?
WHERE series=?''',(new_id, old_id,))
if change_index and tweaks['series_index_auto_increment'] != 'no_change':
# Now set the indices
for (book_id,) in books:
# Get the next series index
index = self.get_next_series_num_for(new_name)
self.conn.execute('''UPDATE books
SET series_index=?
WHERE id=?''',(index, book_id,))
self.dirty_books_referencing('series', new_id, commit=False)
self.clean_standard_field('series', commit=False)
self.conn.commit()
def delete_series_using_id(self, id):
self.dirty_books_referencing('series', id, commit=False)
books = self.conn.get('SELECT book from books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM series WHERE id=?', (id,))
for (book_id,) in books:
self.conn.execute('UPDATE books SET series_index=1.0 WHERE id=?', (book_id,))
self.conn.commit()
def get_publishers_with_ids(self):
result = self.conn.get('SELECT id,name FROM publishers')
if not result:
return []
return result
def rename_publisher(self, old_id, new_name):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from publishers
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
# New name doesn't exist. Simply change the old name
self.conn.execute('UPDATE publishers SET name=? WHERE id=?',
(new_name, old_id))
else:
# Change the link table to point at the new one
self.conn.execute('''UPDATE books_publishers_link
SET publisher=?
WHERE publisher=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.dirty_books_referencing('publisher', new_id, commit=False)
self.clean_standard_field('publisher', commit=False)
self.conn.commit()
def delete_publisher_using_id(self, old_id):
self.dirty_books_referencing('publisher', old_id, commit=False)
self.conn.execute('''DELETE FROM books_publishers_link
WHERE publisher=?''', (old_id,))
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.conn.commit()
def get_authors_with_ids(self):
result = self.conn.get('SELECT id,name,sort,link FROM authors')
if not result:
return []
return result
def get_author_id(self, author):
author = author.replace(',', '|')
result = self.conn.get('SELECT id FROM authors WHERE name=?',
(author,), all=False)
return result
def set_link_field_for_author(self, aid, link, commit=True, notify=False):
if not link:
link = ''
self.conn.execute('UPDATE authors SET link=? WHERE id=?', (link.strip(), aid))
if commit:
self.conn.commit()
def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False):
self.conn.execute('UPDATE authors SET sort=? WHERE id=?',
(new_sort.strip(), old_id))
if commit:
self.conn.commit()
# Now change all the author_sort fields in books by this author
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
for (book_id,) in bks:
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss, notify=notify, commit=commit)
def rename_author(self, old_id, new_name):
# Make sure that any commas in new_name are changed to '|'!
new_name = new_name.replace(',', '|').strip()
if not new_name:
new_name = _('Unknown')
# Get the list of books we must fix up, one way or the other
# Save the list so we can use it twice
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
books = []
for (book_id,) in bks:
books.append(book_id)
# check if the new author already exists
new_id = self.conn.get('SELECT id from authors WHERE name=?',
(new_name,), all=False)
if new_id is None or old_id == new_id:
# No name clash. Go ahead and update the author's name
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
else:
# First check for the degenerate case -- changing a value to itself.
# Update it in case there is a change of case, but do nothing else
if old_id == new_id:
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
self.conn.commit()
return new_id
# Author exists. To fix this, we must replace all the authors
# instead of replacing the one. Reason: db integrity checks can stop
# the rename process, which would leave everything half-done. We
# can't do it the same way as tags (delete and add) because author
# order is important.
for book_id in books:
# Get the existing list of authors
authors = self.conn.get('''
SELECT author from books_authors_link
WHERE book=?
ORDER BY id''',(book_id,))
# unpack the double-list structure, replacing the old author
# with the new one while we are at it
for i,aut in enumerate(authors):
authors[i] = aut[0] if aut[0] != old_id else new_id
# Delete the existing authors list
self.conn.execute('''DELETE FROM books_authors_link
WHERE book=?''',(book_id,))
# Change the authors to the new list
for aid in authors:
try:
self.conn.execute('''
INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (book_id, aid))
except IntegrityError:
# Sometimes books specify the same author twice in their
# metadata. Ignore it.
pass
# Now delete the old author from the DB
self.conn.execute('DELETE FROM authors WHERE id=?', (old_id,))
self.dirtied(books, commit=False)
self.conn.commit()
# the authors are now changed, either by changing the author's name
# or replacing the author in the list. Now must fix up the books.
for book_id in books:
# First, must refresh the cache to see the new authors
self.data.refresh_ids(self, [book_id])
# now fix the filesystem paths
self.set_path(book_id, index_is_id=True)
# Next fix the author sort. Reset it to the default
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss)
# the caller will do a general refresh, so we don't need to
# do one here
return new_id
# end convenience methods
def get_tags(self, id):
result = self.conn.get(
'SELECT name FROM tags WHERE id IN (SELECT tag FROM books_tags_link WHERE book=?)',
(id,), all=True)
if not result:
return set([])
return set([r[0] for r in result])
@classmethod
def cleanup_tags(cls, tags):
tags = [x.strip().replace(',', ';') for x in tags if x.strip()]
tags = [x.decode(preferred_encoding, 'replace')
if isbytestring(x) else x for x in tags]
tags = [u' '.join(x.split()) for x in tags]
ans, seen = [], set([])
for tag in tags:
if tag.lower() not in seen:
seen.add(tag.lower())
ans.append(tag)
return ans
def remove_all_tags(self, ids, notify=False, commit=True):
self.conn.executemany(
'DELETE FROM books_tags_link WHERE book=?', [(x,) for x in ids])
self.dirtied(ids, commit=False)
if commit:
self.conn.commit()
for x in ids:
self.data.set(x, self.FIELD_MAP['tags'], '', row_is_id=True)
if notify:
self.notify('metadata', ids)
def bulk_modify_tags(self, ids, add=[], remove=[], notify=False):
add = self.cleanup_tags(add)
remove = self.cleanup_tags(remove)
remove = set(remove) - set(add)
if not ids or (not add and not remove):
return
# Add tags that do not already exist into the tag table
all_tags = self.all_tags()
lt = [t.lower() for t in all_tags]
new_tags = [t for t in add if t.lower() not in lt]
if new_tags:
self.conn.executemany('INSERT INTO tags(name) VALUES (?)', [(x,) for x in
new_tags])
# Create the temporary tables to store the ids for books and tags
# to be operated on
tables = ('temp_bulk_tag_edit_books', 'temp_bulk_tag_edit_add',
'temp_bulk_tag_edit_remove')
drops = '\n'.join(['DROP TABLE IF EXISTS %s;'%t for t in tables])
creates = '\n'.join(['CREATE TEMP TABLE %s(id INTEGER PRIMARY KEY);'%t
for t in tables])
self.conn.executescript(drops + creates)
# Populate the books temp table
self.conn.executemany(
'INSERT INTO temp_bulk_tag_edit_books VALUES (?)',
[(x,) for x in ids])
# Populate the add/remove tags temp tables
for table, tags in enumerate([add, remove]):
if not tags:
continue
table = tables[table+1]
insert = ('INSERT INTO %s(id) SELECT tags.id FROM tags WHERE name=?'
' COLLATE PYNOCASE LIMIT 1')
self.conn.executemany(insert%table, [(x,) for x in tags])
if remove:
self.conn.execute(
'''DELETE FROM books_tags_link WHERE
book IN (SELECT id FROM %s) AND
tag IN (SELECT id FROM %s)'''
% (tables[0], tables[2]))
if add:
self.conn.execute(
'''
INSERT OR REPLACE INTO books_tags_link(book, tag) SELECT {0}.id, {1}.id FROM
{0}, {1}
'''.format(tables[0], tables[1])
)
self.conn.executescript(drops)
self.dirtied(ids, commit=False)
self.conn.commit()
for x in ids:
tags = u','.join(self.get_tags(x))
self.data.set(x, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', ids)
def commit(self):
self.conn.commit()
def set_tags(self, id, tags, append=False, notify=True, commit=True,
allow_case_change=False):
'''
@param tags: list of strings
@param append: If True existing tags are not removed
'''
if not tags:
tags = []
if not append:
self.conn.execute('DELETE FROM books_tags_link WHERE book=?', (id,))
otags = self.get_tags(id)
tags = self.cleanup_tags(tags)
books_to_refresh = {id}
for tag in (set(tags)-otags):
case_changed = False
tag = tag.strip()
if not tag:
continue
if not isinstance(tag, unicode):
tag = tag.decode(preferred_encoding, 'replace')
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
etag = existing_tags[idx]
tid = self.conn.get('SELECT id FROM tags WHERE name=?', (etag,), all=False)
if allow_case_change and etag != tag:
self.conn.execute('UPDATE tags SET name=? WHERE id=?', (tag, tid))
case_changed = True
else:
tid = self.conn.execute('INSERT INTO tags(name) VALUES(?)', (tag,)).lastrowid
if not self.conn.get('''SELECT book FROM books_tags_link
WHERE book=? AND tag=?''', (id, tid), all=False):
self.conn.execute('''INSERT INTO books_tags_link(book, tag)
VALUES (?,?)''', (id, tid))
if case_changed:
bks = self.conn.get('SELECT book FROM books_tags_link WHERE tag=?',
(tid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
FROM books_tags_link WHERE tag=tags.id) < 1''')
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
tags = u','.join(self.get_tags(id))
self.data.set(id, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def unapply_tags(self, book_id, tags, notify=True):
for tag in tags:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (tag,), all=False)
if id:
self.conn.execute('''DELETE FROM books_tags_link
WHERE tag=? AND book=?''', (id, book_id))
self.conn.commit()
self.data.refresh_ids(self, [book_id])
if notify:
self.notify('metadata', [id])
def is_tag_used(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
lt.index(tag.lower())
return True
except ValueError:
return False
def delete_tag(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (existing_tags[idx],), all=False)
if id:
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def _get_series_values(self, val):
return _get_series_values(val)
def set_series(self, id, series, notify=True, commit=True, allow_case_change=True):
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
(series, idx) = self._get_series_values(series)
books_to_refresh = {id}
if series:
case_change = False
if not isinstance(series, unicode):
series = series.decode(preferred_encoding, 'replace')
series = series.strip()
series = u' '.join(series.split())
sx = self.conn.get('SELECT id,name from series WHERE name=?', (series,))
if sx:
aid, cur_name = sx[0]
if cur_name != series:
if allow_case_change:
self.conn.execute('UPDATE series SET name=? WHERE id=?', (series, aid))
case_change = True
else:
series = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('INSERT INTO series(name) VALUES (?)', (series,)).lastrowid
self.conn.execute('INSERT INTO books_series_link(book, series) VALUES (?,?)', (id, aid))
if idx:
self.set_series_index(id, idx, notify=notify, commit=commit)
if case_change:
bks = self.conn.get('SELECT book FROM books_series_link WHERE series=?',
(aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM series
WHERE (SELECT COUNT(id) FROM books_series_link
WHERE series=series.id) < 1''')
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series'], series, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_series_index(self, id, idx, notify=True, commit=True):
if idx is None:
idx = 1.0
try:
idx = float(idx)
except:
idx = 1.0
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series_index'], idx, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_rating(self, id, rating, notify=True, commit=True):
if not rating:
rating = 0
rating = int(rating)
self.conn.execute('DELETE FROM books_ratings_link WHERE book=?',(id,))
rat = self.conn.get('SELECT id FROM ratings WHERE rating=?', (rating,), all=False)
rat = rat if rat is not None else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['rating'], rating, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_comment(self, id, text, notify=True, commit=True):
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
if text:
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
else:
text = ''
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
self.dirtied([id], commit=False)
if notify:
self.notify('metadata', [id])
def set_author_sort(self, id, sort, notify=True, commit=True):
if not sort:
sort = ''
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['author_sort'], sort, row_is_id=True)
if notify:
self.notify('metadata', [id])
def isbn(self, idx, index_is_id=False):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
if x.startswith('isbn:'):
return x[5:].strip()
def get_identifiers(self, idx, index_is_id=False):
ans = {}
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
key, _, val = x.partition(':')
key, val = key.strip(), val.strip()
if key and val:
ans[key] = val
return ans
def get_all_identifier_types(self):
idents = self.conn.get('SELECT DISTINCT type FROM identifiers')
return [ident[0] for ident in idents]
def _clean_identifier(self, typ, val):
typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
val = val.strip().replace(',', '|').replace(':', '|')
return typ, val
def set_identifier(self, id_, typ, val, notify=True, commit=True):
'If val is empty, deletes identifier of type typ'
typ, val = self._clean_identifier(typ, val)
identifiers = self.get_identifiers(id_, index_is_id=True)
if not typ:
return
changed = False
if not val and typ in identifiers:
identifiers.pop(typ)
changed = True
self.conn.execute(
'DELETE from identifiers WHERE book=? AND type=?',
(id_, typ))
if val and identifiers.get(typ, None) != val:
changed = True
identifiers[typ] = val
self.conn.execute(
'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
(id_, typ, val))
if changed:
raw = ','.join(['%s:%s'%(k, v) for k, v in
identifiers.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_identifiers(self, id_, identifiers, notify=True, commit=True):
cleaned = {}
if not identifiers:
identifiers = {}
for typ, val in identifiers.iteritems():
typ, val = self._clean_identifier(typ, val)
if val:
cleaned[typ] = val
self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
self.conn.executemany(
'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
[(id_, k, v) for k, v in cleaned.iteritems()])
raw = ','.join(['%s:%s'%(k, v) for k, v in
cleaned.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_isbn(self, id_, isbn, notify=True, commit=True):
self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
with lopen(path, 'rb') as stream:
matches = self.data.get_matches('title', '='+title)
if matches:
tag_matches = self.data.get_matches('tags', '='+_('Catalog'))
matches = matches.intersection(tag_matches)
db_id = None
if matches:
db_id = list(matches)[0]
if db_id is None:
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(title, 'calibre'))
db_id = obj.lastrowid
self.data.books_added([db_id], self)
self.set_path(db_id, index_is_id=True)
self.conn.commit()
try:
mi = get_metadata(stream, format)
except:
mi = Metadata(title, ['calibre'])
stream.seek(0)
mi.title, mi.authors = title, ['calibre']
mi.tags = [_('Catalog')]
mi.pubdate = mi.timestamp = utcnow()
if format == 'mobi':
mi.cover, mi.cover_data = None, (None, None)
self.set_metadata(db_id, mi)
self.add_format(db_id, format, stream, index_is_id=True)
self.conn.commit()
self.data.refresh_ids(self, [db_id]) # Needed to update format list and size
return db_id
def add_news(self, path, arg):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
stream = path if hasattr(path, 'read') else lopen(path, 'rb')
stream.seek(0)
mi = get_metadata(stream, format, use_libprs_metadata=False,
force_read_metadata=True)
# Force the author to calibre as the auto delete of old news checks for
# both the author==calibre and the tag News
mi.authors = ['calibre']
stream.seek(0)
if mi.series_index is None:
mi.series_index = self.get_next_series_num_for(mi.series)
mi.tags = [_('News')]
if arg['add_title_tag']:
mi.tags += [arg['title']]
if arg['custom_tags']:
mi.tags += arg['custom_tags']
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(mi.title, mi.authors[0]))
id = obj.lastrowid
self.data.books_added([id], self)
self.set_path(id, index_is_id=True)
self.conn.commit()
if mi.pubdate is None:
mi.pubdate = utcnow()
if mi.timestamp is None:
mi.timestamp = utcnow()
self.set_metadata(id, mi)
self.add_format(id, format, stream, index_is_id=True)
if not hasattr(path, 'read'):
stream.close()
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
return id
def run_import_plugins(self, path_or_stream, format):
format = format.lower()
if hasattr(path_or_stream, 'seek'):
path_or_stream.seek(0)
pt = PersistentTemporaryFile('_import_plugin.'+format)
shutil.copyfileobj(path_or_stream, pt, 1024**2)
pt.close()
path = pt.name
else:
path = path_or_stream
return run_plugins_on_import(path, format)
def _add_newbook_tag(self, mi):
tags = prefs['new_book_tags']
if tags:
for tag in [t.strip() for t in tags]:
if tag:
if mi.tags is None:
mi.tags = [tag]
else:
mi.tags.append(tag)
def create_book_entry(self, mi, cover=None, add_duplicates=True,
force_id=None):
if mi.tags:
mi.tags = list(mi.tags)
self._add_newbook_tag(mi)
if not add_duplicates and self.has_book(mi):
return None
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isbytestring(aus):
aus = aus.decode(preferred_encoding, 'replace')
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
if force_id is None:
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
else:
id = force_id
obj = self.conn.execute(
'INSERT INTO books(id, title, series_index, '
'author_sort) VALUES (?, ?, ?, ?)',
(id, title, series_index, aus))
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if cover is not None:
try:
self.set_cover(id, cover)
except:
traceback.print_exc()
return id
def add_books(self, paths, formats, metadata, add_duplicates=True,
return_ids=False):
'''
Add a book to the database. The result cache is not updated.
:param:`paths` List of paths to book files or file-like objects
'''
formats, metadata = iter(formats), iter(metadata)
duplicates = []
ids = []
postimport = []
for path in paths:
mi = metadata.next()
self._add_newbook_tag(mi)
format = formats.next()
if not add_duplicates and self.has_book(mi):
duplicates.append((path, format, mi))
continue
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
if isinstance(title, str):
title = title.decode(preferred_encoding)
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
ids.append(id)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, commit=True, ignore_errors=True)
npath = self.run_import_plugins(path, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
self.add_format(id, format, stream, index_is_id=True)
stream.close()
postimport.append((id, format))
self.conn.commit()
self.data.refresh_ids(self, ids) # Needed to update format list and size
for book_id, fmt in postimport:
run_plugins_on_postimport(self, book_id, fmt)
if duplicates:
paths = list(duplicate[0] for duplicate in duplicates)
formats = list(duplicate[1] for duplicate in duplicates)
metadata = list(duplicate[2] for duplicate in duplicates)
return (paths, formats, metadata), (ids if return_ids else
len(ids))
return None, (ids if return_ids else len(ids))
def import_book(self, mi, formats, notify=True, import_hooks=True,
apply_import_tags=True, preserve_uuid=False):
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
if apply_import_tags:
self._add_newbook_tag(mi)
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
title = mi.title if isinstance(mi.title, unicode) else \
mi.title.decode(preferred_encoding, 'replace')
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if preserve_uuid and mi.uuid:
self.set_uuid(id, mi.uuid, commit=False)
for path in formats:
ext = os.path.splitext(path)[1][1:].lower()
if ext == 'opf':
continue
if import_hooks:
self.add_format_with_hooks(id, ext, path, index_is_id=True)
else:
with lopen(path, 'rb') as f:
self.add_format(id, ext, f, index_is_id=True)
# Mark the book dirty, It probably already has been done by
# set_metadata, but probably isn't good enough
self.dirtied([id], commit=False)
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
if notify:
self.notify('add', [id])
return id
def get_top_level_move_items(self):
items = set(os.listdir(self.library_path))
paths = set([])
for x in self.data.universal_set():
path = self.path(x, index_is_id=True)
path = path.split(os.sep)[0]
paths.add(path)
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
path_map = {}
for x in paths:
path_map[x] = x
if not self.is_case_sensitive:
for x in items:
path_map[x.lower()] = x
items = set(path_map)
paths = set([x.lower() for x in paths])
items = items.intersection(paths)
return items, path_map
def move_library_to(self, newloc, progress=None):
if progress is None:
progress = lambda x:x
if not os.path.exists(newloc):
os.makedirs(newloc)
old_dirs = set([])
items, path_map = self.get_top_level_move_items()
for x in items:
src = os.path.join(self.library_path, x)
dest = os.path.join(newloc, path_map[x])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
old_dirs.add(src)
else:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
x = path_map[x]
if not isinstance(x, unicode):
x = x.decode(filesystem_encoding, 'replace')
progress(x)
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
opath = self.dbpath
self.conn.close()
self.library_path, self.dbpath = newloc, dbpath
self.connect()
try:
os.unlink(opath)
except:
pass
for dir in old_dirs:
try:
shutil.rmtree(dir)
except:
pass
def __iter__(self):
for record in self.data._data:
if record is not None:
yield record
def all_ids(self):
x = self.FIELD_MAP['id']
for i in iter(self):
yield i[x]
def migrate_old(self, db, progress):
from PyQt4.QtCore import QCoreApplication
header = _(u'<p>Migrating old database to ebook library in %s<br><center>')%self.library_path
progress.setValue(0)
progress.setLabelText(header)
QCoreApplication.processEvents()
db.conn.row_factory = lambda cursor, row: tuple(row)
db.conn.text_factory = lambda x: unicode(x, 'utf-8', 'replace')
books = db.conn.get('SELECT id, title, sort, timestamp, series_index, author_sort, isbn FROM books ORDER BY id ASC')
progress.setAutoReset(False)
progress.setRange(0, len(books))
for book in books:
self.conn.execute('INSERT INTO books(id, title, sort, timestamp, series_index, author_sort, isbn) VALUES(?, ?, ?, ?, ?, ?, ?, ?);', book)
tables = '''
authors ratings tags series books_tags_link
comments publishers
books_authors_link conversion_options
books_publishers_link
books_ratings_link
books_series_link feeds
'''.split()
for table in tables:
rows = db.conn.get('SELECT * FROM %s ORDER BY id ASC'%table)
for row in rows:
self.conn.execute('INSERT INTO %s VALUES(%s)'%(table, ','.join(repeat('?', len(row)))), row)
self.conn.commit()
self.refresh('timestamp', True)
for i, book in enumerate(books):
progress.setLabelText(header+_(u'Copying <b>%s</b>')%book[1])
id = book[0]
self.set_path(id, True)
formats = db.formats(id, index_is_id=True)
if not formats:
formats = []
else:
formats = formats.split(',')
for format in formats:
data = db.format(id, format, index_is_id=True)
if data:
self.add_format(id, format, cStringIO.StringIO(data), index_is_id=True)
cover = db.cover(id, index_is_id=True)
if cover:
self.set_cover(id, cover)
progress.setValue(i+1)
self.conn.commit()
progress.setLabelText(_('Compacting database'))
self.vacuum()
progress.reset()
return len(books)
def find_books_in_directory(self, dirpath, single_book_per_directory):
return find_books_in_directory(dirpath, single_book_per_directory)
def import_book_directory_multiple(self, dirpath, callback=None,
added_ids=None):
return import_book_directory_multiple(self, dirpath, callback=callback, added_ids=added_ids)
def import_book_directory(self, dirpath, callback=None, added_ids=None):
return import_book_directory(self, dirpath, callback=callback, added_ids=added_ids)
def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
return recursive_import(self, root, single_book_per_directory=single_book_per_directory, callback=callback, added_ids=added_ids)
def add_custom_book_data(self, book_id, name, val):
x = self.conn.get('SELECT id FROM books WHERE ID=?', (book_id,), all=False)
if x is None:
raise ValueError('add_custom_book_data: no such book_id %d'%book_id)
# Do the json encode first, in case it throws an exception
s = json.dumps(val, default=to_json)
self.conn.execute('''INSERT OR REPLACE INTO books_plugin_data(book, name, val)
VALUES(?, ?, ?)''', (book_id, name, s))
self.commit()
def add_multiple_custom_book_data(self, name, vals, delete_first=False):
if delete_first:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.conn.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in vals.iteritems()])
self.commit()
def get_custom_book_data(self, book_id, name, default=None):
try:
s = self.conn.get('''select val FROM books_plugin_data
WHERE book=? AND name=?''', (book_id, name), all=False)
if s is None:
return default
return json.loads(s, object_hook=from_json)
except:
pass
return default
def get_all_custom_book_data(self, name, default=None):
try:
s = self.conn.get('''select book, val FROM books_plugin_data
WHERE name=?''', (name,))
if s is None:
return default
res = {}
for r in s:
res[r[0]] = json.loads(r[1], object_hook=from_json)
return res
except:
pass
return default
def delete_custom_book_data(self, book_id, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE book=? AND name=?',
(book_id, name))
self.commit()
def delete_all_custom_book_data(self, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.commit()
def get_ids_for_custom_book_data(self, name):
s = self.conn.get('''SELECT book FROM books_plugin_data WHERE name=?''', (name,))
return [x[0] for x in s]
def get_usage_count_by_id(self, field):
fm = self.field_metadata[field]
if not fm.get('link_column', None):
raise ValueError('%s is not an is_multiple field')
return self.conn.get(
'SELECT {0}, count(*) FROM books_{1}_link GROUP BY {0}'.format(
fm['link_column'], fm['table']))
def all_author_names(self):
ai = self.FIELD_MAP['authors']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x.replace('|', ','))
return ans
def all_tag_names(self):
ai = self.FIELD_MAP['tags']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x)
return ans
def all_publisher_names(self):
ai = self.FIELD_MAP['publisher']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
def all_series_names(self):
ai = self.FIELD_MAP['series']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
| gpl-3.0 |
arnondora/wordpress-paper-theme | PaperTheme/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | 9537 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| mit |
robovm/robovm-studio | python/helpers/profiler/thriftpy3/transport/TSSLSocket.py | 44 | 8264 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import socket
import ssl
from thriftpy3.transport import TSocket
from thriftpy3.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None,
ciphers=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
@param ciphers: The cipher suites to allow. This is passed to
the ssl_wrap function as the 'ciphers' parameter.
@type ciphers: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
self.ciphers = ciphers
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs,
ciphers=self.ciphers)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error as e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error as e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None,
ciphers=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
@param ciphers: The cipher suites to allow. This is passed to
the ssl_wrap function as the 'ciphers' parameter.
@type ciphers: str
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
self.ciphers = ciphers
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION,
ciphers=self.ciphers)
except ssl.SSLError as ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
| apache-2.0 |
Deepak345/al-go-rithms | cryptography/steganography/python/steganography.py | 3 | 4403 | import getopt
import math
import os
import struct
import sys
import wave
def hide(sound_path, file_path, output_path, num_lsb):
sound = wave.open(sound_path, "r")
params = sound.getparams()
num_channels = sound.getnchannels()
sample_width = sound.getsampwidth()
num_frames = sound.getnframes()
num_samples = num_frames * num_channels
max_bytes_to_hide = (num_samples * num_lsb) // 8
filesize = os.stat(file_path).st_size
if filesize > max_bytes_to_hide:
required_LSBs = math.ceil(filesize * 8 / num_samples)
raise ValueError("Input file too large to hide, "
"requires {} LSBs, using {}"
.format(required_LSBs, num_lsb))
print("Using {} B out of {} B".format(filesize, max_bytes_to_hide))
print(sample_width)
if sample_width == 1: # samples are unsigned 8-bit integers
fmt = "{}B".format(num_samples)
mask = (1 << 8) - (1 << num_lsb)
min_sample = -(1 << 8)
elif sample_width == 2: # samples are signed 16-bit integers
fmt = "{}h".format(num_samples)
mask = (1 << 15) - (1 << num_lsb)
min_sample = -(1 << 15)
else:
raise ValueError("File has an unsupported bit-depth")
raw_data = list(struct.unpack(fmt, sound.readframes(num_frames)))
sound.close()
input_data = memoryview(open(file_path, "rb").read())
data_index = 0
sound_index = 0
values = []
buffer = 0
buffer_length = 0
done = False
print(input_data[1])
while not done:
while buffer_length < num_lsb and data_index // 8 < len(input_data):
buffer += (input_data[data_index // 8] >> (data_index % 8)
) << buffer_length
bits_added = 8 - (data_index % 8)
buffer_length += bits_added
data_index += bits_added
current_data = buffer % (1 << num_lsb)
buffer >>= num_lsb
buffer_length -= num_lsb
while (sound_index < len(raw_data) and
raw_data[sound_index] == min_sample):
values.append(struct.pack(fmt[-1], raw_data[sound_index]))
sound_index += 1
if sound_index < len(raw_data):
current_sample = raw_data[sound_index]
sound_index += 1
sign = 1
if current_sample < 0:
current_sample = -current_sample
sign = -1
altered_sample = sign * ((current_sample & mask) | current_data)
values.append(struct.pack(fmt[-1], altered_sample))
if data_index // 8 >= len(input_data) and buffer_length <= 0:
done = True
while sound_index < len(raw_data):
values.append(struct.pack(fmt[-1], raw_data[sound_index]))
sound_index += 1
sound_steg = wave.open(output_path, "w")
sound_steg.setparams(params)
sound_steg.writeframes(b"".join(values))
sound_steg.close()
def recover(sound_path, output_path, num_lsb, bytes_to_recover):
sound = wave.open(sound_path, "r")
num_channels = sound.getnchannels()
sample_width = sound.getsampwidth()
num_frames = sound.getnframes()
num_samples = num_frames * num_channels
if (sample_width == 1): # samples 8 bits
fmt = "{}B".format(num_samples)
min_sample = -(1 << 8)
elif (sample_width == 2): # samples 16 bits
fmt = "{}h".format(num_samples)
min_sample = -(1 << 15)
else:
raise ValueError("File has an unsupported bit-depth")
raw_data = list(struct.unpack(fmt, sound.readframes(num_frames)))
mask = (1 << num_lsb) - 1
output_file = open(output_path, "wb+")
data = bytearray()
sound_index = 0
buffer = 0
buffer_length = 0
while (bytes_to_recover > 0):
next_sample = raw_data[sound_index]
if (next_sample != min_sample):
buffer += (abs(next_sample) & mask) << buffer_length
buffer_length += num_lsb
sound_index += 1
while (buffer_length >= 8 and bytes_to_recover > 0):
current_data = buffer % (1 << 8)
buffer >>= 8
buffer_length -= 8
data += struct.pack('1B', current_data)
bytes_to_recover -= 1
output_file.write(bytes(data))
output_file.close()
| mit |
tashaxe/Red-DiscordBot | lib/youtube_dl/extractor/brightcove.py | 2 | 30054 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_xml_parse_error,
compat_HTTPError,
)
from ..utils import (
determine_ext,
ExtractorError,
extract_attributes,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
js_to_json,
int_or_none,
parse_iso8601,
unescapeHTML,
unsmuggle_url,
update_url_query,
clean_html,
mimetype2ext,
)
class BrightcoveLegacyIE(InfoExtractor):
IE_NAME = 'brightcove:legacy'
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL = 'http://c.brightcove.com/services/viewer/htmlFederated'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
'timestamp': 1368213670,
'upload_date': '20130510',
'uploader_id': '1589608506001',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
'timestamp': 1344975024,
'upload_date': '20120814',
'uploader_id': '1460825906',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
'timestamp': 1382041798,
'upload_date': '20131017',
'uploader_id': '1130468786001',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
'skip': 'Video gone',
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3Aevent-stream-356&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '3750436379001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'RBTV Old (do not use)',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'timestamp': 1409122195,
'upload_date': '20140827',
'uploader_id': '710858724001',
},
'skip': 'Video gone',
},
{
# playlist with 'videoList'
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
{
# playlist with 'playlistTab' (https://github.com/rg3/youtube-dl/issues/9965)
'url': 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=AQ%7E%7E,AAABXlLMdok%7E,NJ4EoMlZ4rZdx9eU1rkMVd8EaYPBBUlg',
'info_dict': {
'id': '1522758701001',
'title': 'Lesson 08',
},
'playlist_mincount': 10,
},
]
FLV_VCODECS = {
1: 'SORENSON',
2: 'ON2',
3: 'H264',
4: 'VP8',
}
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
data_url = object_doc.attrib.get('data', '')
data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return data_url_params.get(name)
params = {}
playerID = find_param('playerID') or find_param('playerId')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# These fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList')
if videoPlayer is not None:
if isinstance(videoPlayer, list):
videoPlayer = videoPlayer[0]
videoPlayer = videoPlayer.strip()
# UUID is also possible for videoPlayer (e.g.
# http://www.popcornflix.com/hoodies-vs-hooligans/7f2d2b87-bbf2-4623-acfb-ea942b4f01dd
# or http://www8.hp.com/cn/zh/home.html)
if not (re.match(
r'^(?:\d+|[\da-fA-F]{8}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{12})$',
videoPlayer) or videoPlayer.startswith('ref:')):
return None
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC\.createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
return update_url_query(cls._FEDERATED_URL, params)
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'''(?x)
<meta\s+
(?:property|itemprop)=([\'"])(?:og:video|embedURL)\1[^>]+
content=([\'"])(?P<url>https?://(?:secure|c)\.brightcove.com/(?:(?!\2).)+)\2
''', webpage)
if url_m:
url = unescapeHTML(url_m.group('url'))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url or 'idVideo' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|idVideo|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query, referer=None):
headers = {}
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
headers['Referer'] = referer
webpage = self._download_webpage(self._FEDERATED_URL, video_id, headers=headers, query=query)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' in json_data:
playlist_info = json_data['videoList']
playlist_dto = playlist_info['mediaCollectionDTO']
elif 'playlistTabs' in json_data:
playlist_info = json_data['playlistTabs']
playlist_dto = playlist_info['lineupListDTO']['playlistDTOs'][0]
else:
raise ExtractorError('Empty playlist')
videos = [self._extract_video_info(video_info) for video_info in playlist_dto['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_dto['displayName'])
def _extract_video_info(self, video_info):
video_id = compat_str(video_info['id'])
publisher_id = video_info.get('publisherId')
info = {
'id': video_id,
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
'uploader_id': compat_str(publisher_id) if publisher_id else None,
'duration': float_or_none(video_info.get('length'), 1000),
'timestamp': int_or_none(video_info.get('creationDate'), 1000),
}
renditions = video_info.get('renditions', []) + video_info.get('IOSRenditions', [])
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(
url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
tbr = int_or_none(rend.get('encodingRate'), 1000)
a_format = {
'format_id': 'http%s' % ('-%s' % tbr if tbr else ''),
'url': url,
'ext': ext,
'filesize': int_or_none(rend.get('size')) or None,
'tbr': tbr,
}
if rend.get('audioOnly'):
a_format.update({
'vcodec': 'none',
})
else:
a_format.update({
'height': int_or_none(rend.get('frameHeight')),
'width': int_or_none(rend.get('frameWidth')),
'vcodec': rend.get('videoCodec'),
})
# m3u8 manifests with remote == false are media playlists
# Not calling _extract_m3u8_formats here to save network traffic
if ext == 'm3u8':
a_format.update({
'format_id': 'hls%s' % ('-%s' % tbr if tbr else ''),
'ext': 'mp4',
'protocol': 'm3u8_native',
})
formats.append(a_format)
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
'vcodec': self.FLV_VCODECS.get(video_info.get('FLVFullCodec')),
'filesize': int_or_none(video_info.get('FLVFullSize')),
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % video_id)
return info
class BrightcoveNewIE(InfoExtractor):
IE_NAME = 'brightcove:new'
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>\d+|ref:[^&]+)'
_TESTS = [{
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
'md5': 'c8100925723840d4b0d243f7025703be',
'info_dict': {
'id': '4463358922001',
'ext': 'mp4',
'title': 'Meet the man behind Popcorn Time',
'description': 'md5:eac376a4fe366edc70279bfb681aea16',
'duration': 165.768,
'timestamp': 1441391203,
'upload_date': '20150904',
'uploader_id': '929656772001',
'formats': 'mincount:22',
},
}, {
# with rtmp streams
'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001',
'info_dict': {
'id': '4279049078001',
'ext': 'mp4',
'title': 'Titansgrave: Chapter 0',
'description': 'Titansgrave: Chapter 0',
'duration': 1242.058,
'timestamp': 1433556729,
'upload_date': '20150606',
'uploader_id': '4036320279001',
'formats': 'mincount:41',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# ref: prefixed video id
'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
'only_matching': True,
}, {
# non numeric ref: prefixed video id
'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356',
'only_matching': True,
}, {
# unavailable video without message but with error_code
'url': 'http://players.brightcove.net/1305187701/c832abfb-641b-44eb-9da0-2fe76786505f_default/index.html?videoId=4377407326001',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
urls = BrightcoveNewIE._extract_urls(webpage)
return urls[0] if urls else None
@staticmethod
def _extract_urls(ie, webpage):
# Reference:
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#tag
# 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
# 4. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/in-page-embed-player-implementation.html
# 5. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
entries = []
# Look for iframe embeds [1]
for _, url in re.findall(
r'<iframe[^>]+src=(["\'])((?:https?:)?//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
entries.append(url if url.startswith('http') else 'http:' + url)
# Look for <video> tags [2] and embed_in_page embeds [3]
# [2] looks like:
for video, script_tag, account_id, player_id, embed in re.findall(
r'''(?isx)
(<video\s+[^>]+>)
(?:.*?
(<script[^>]+
src=["\'](?:https?:)?//players\.brightcove\.net/
(\d+)/([^/]+)_([^/]+)/index(?:\.min)?\.js
)
)?
''', webpage):
attrs = extract_attributes(video)
# According to examples from [4] it's unclear whether video id
# may be optional and what to do when it is
video_id = attrs.get('data-video-id')
if not video_id:
continue
account_id = account_id or attrs.get('data-account')
if not account_id:
continue
player_id = player_id or attrs.get('data-player') or 'default'
embed = embed or attrs.get('data-embed') or 'default'
bc_url = 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' % (
account_id, player_id, embed, video_id)
# Some brightcove videos may be embedded with video tag only and
# without script tag or any mentioning of brightcove at all. Such
# embeds are considered ambiguous since they are matched based only
# on data-video-id and data-account attributes and in the wild may
# not be brightcove embeds at all. Let's check reconstructed
# brightcove URLs in case of such embeds and only process valid
# ones. By this we ensure there is indeed a brightcove embed.
if not script_tag and not ie._is_valid_url(
bc_url, video_id, 'possible brightcove video'):
continue
entries.append(bc_url)
return entries
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(
'http://players.brightcove.net/%s/%s_%s/index.min.js'
% (account_id, player_id, embed), video_id)
policy_key = None
catalog = self._search_regex(
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
if catalog:
catalog = self._parse_json(
js_to_json(catalog), video_id, fatal=False)
if catalog:
policy_key = catalog.get('policyKey')
if not policy_key:
policy_key = self._search_regex(
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
webpage, 'policy key', group='pk')
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
try:
json_data = self._download_json(api_url, video_id, headers={
'Accept': 'application/json;pk=%s' % policy_key
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
message = json_data.get('message') or json_data['error_code']
if json_data.get('error_subcode') == 'CLIENT_GEO':
self.raise_geo_restricted(msg=message)
raise ExtractorError(message, expected=True)
raise
title = json_data['name'].strip()
formats = []
for source in json_data.get('sources', []):
container = source.get('container')
ext = mimetype2ext(source.get('type'))
src = source.get('src')
if ext == 'ism' or container == 'WVM':
continue
elif ext == 'm3u8' or container == 'M2TS':
if not src:
continue
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
elif ext == 'mpd':
if not src:
continue
formats.extend(self._extract_mpd_formats(src, video_id, 'dash', fatal=False))
else:
streaming_src = source.get('streaming_src')
stream_name, app_name = source.get('stream_name'), source.get('app_name')
if not src and not streaming_src and (not stream_name or not app_name):
continue
tbr = float_or_none(source.get('avg_bitrate'), 1000)
height = int_or_none(source.get('height'))
width = int_or_none(source.get('width'))
f = {
'tbr': tbr,
'filesize': int_or_none(source.get('size')),
'container': container,
'ext': ext or container.lower(),
}
if width == 0 and height == 0:
f.update({
'vcodec': 'none',
})
else:
f.update({
'width': width,
'height': height,
'vcodec': source.get('codec'),
})
def build_format_id(kind):
format_id = kind
if tbr:
format_id += '-%dk' % int(tbr)
if height:
format_id += '-%dp' % height
return format_id
if src or streaming_src:
f.update({
'url': src or streaming_src,
'format_id': build_format_id('http' if src else 'http-streaming'),
'source_preference': 0 if src else -1,
})
else:
f.update({
'url': app_name,
'play_path': stream_name,
'format_id': build_format_id('rtmp'),
})
formats.append(f)
errors = json_data.get('errors')
if not formats and errors:
error = errors[0]
raise ExtractorError(
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
self._sort_formats(formats)
subtitles = {}
for text_track in json_data.get('text_tracks', []):
if text_track.get('src'):
subtitles.setdefault(text_track.get('srclang'), []).append({
'url': text_track['src'],
})
is_live = False
duration = float_or_none(json_data.get('duration'), 1000)
if duration and duration < 0:
is_live = True
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(json_data.get('description')),
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
'duration': duration,
'timestamp': parse_iso8601(json_data.get('published_at')),
'uploader_id': account_id,
'formats': formats,
'subtitles': subtitles,
'tags': json_data.get('tags', []),
'is_live': is_live,
}
| gpl-3.0 |
horazont/aioxmpp | aioxmpp/ibr/service.py | 1 | 5326 | ########################################################################
# File name: service.py
# This file is part of: aioxmpp
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import asyncio
import aioxmpp
import logging
from aioxmpp.service import Service
from . import xso
logger = logging.getLogger(__name__)
async def get_registration_fields(xmlstream, timeout=60):
"""
A query is sent to the server to obtain the fields that need to be
filled to register with the server.
:param xmlstream: Specifies the stream connected to the server where
the account will be created.
:type xmlstream: :class:`aioxmpp.protocol.XMLStream`
:param timeout: Maximum time in seconds to wait for an IQ response, or
:data:`None` to disable the timeout.
:type timeout: :class:`~numbers.Real` or :data:`None`
:return: :attr:`list`
"""
iq = aioxmpp.IQ(
to=aioxmpp.JID.fromstr(xmlstream._to),
type_=aioxmpp.IQType.GET,
payload=xso.Query()
)
iq.autoset_id()
reply = await aioxmpp.protocol.send_and_wait_for(
xmlstream,
[iq],
[aioxmpp.IQ],
timeout=timeout
)
return reply.payload
async def register(xmlstream, query_xso, timeout=60):
"""
Create a new account on the server.
:param query_xso: XSO with the information needed for the registration.
:type query_xso: :class:`~aioxmpp.ibr.Query`
:param xmlstream: Specifies the stream connected to the server where
the account will be created.
:type xmlstream: :class:`aioxmpp.protocol.XMLStream`
:param timeout: Maximum time in seconds to wait for an IQ response, or
:data:`None` to disable the timeout.
:type timeout: :class:`~numbers.Real` or :data:`None`
"""
iq = aioxmpp.IQ(
to=aioxmpp.JID.fromstr(xmlstream._to),
type_=aioxmpp.IQType.SET,
payload=query_xso
)
iq.autoset_id()
await aioxmpp.protocol.send_and_wait_for(
xmlstream,
[iq],
[aioxmpp.IQ],
timeout=timeout
)
def get_used_fields(payload):
"""
Get a list containing the names of the fields that are used in the
xso.Query.
:param payload: Query object o be
:type payload: :class:`~aioxmpp.ibr.Query`
:return: :attr:`list`
"""
return [
tag
for tag, descriptor in payload.CHILD_MAP.items()
if descriptor.__get__(payload, type(payload)) is not None
]
class RegistrationService(Service):
"""
Service implementing the XMPP In-Band Registration(:xep:`0077`)
use cases for registered entities.
This service allows an already registered and authenticated entity
to request information about the registration, cancel an existing
registration, or change a password.
.. automethod:: get_client_info
.. automethod:: change_pass
.. automethod:: cancel_registration
"""
async def get_client_info(self):
"""
A query is sent to the server to obtain the client's data stored at the
server.
:return: :class:`~aioxmpp.ibr.Query`
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.GET,
payload=xso.Query()
)
reply = await self.client.send(iq)
return reply
async def change_pass(self, new_pass):
"""
Change the client password for 'new_pass'.
:param new_pass: New password of the client.
:type new_pass: :class:`str`
:param old_pass: Old password of the client.
:type old_pass: :class:`str`
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.SET,
payload=xso.Query(self.client.local_jid.localpart, new_pass)
)
await self.client.send(iq)
async def cancel_registration(self):
"""
Cancels the currents client's account with the server.
Even if the cancelation is succesful, this method will raise an
exception due to he account no longer exists for the server, so the
client will fail.
To continue with the execution, this method should be surrounded by a
try/except statement.
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.SET,
payload=xso.Query()
)
iq.payload.remove = True
await self.client.send(iq)
| lgpl-3.0 |
nakagami/reportlab | tests/test_pdfgen_links.py | 1 | 6833 | #Copyright ReportLab Europe Ltd. 2000-2012
#this test and associates functionality kinds donated by Ian Sparks.
#see license.txt for license details
"""
Tests for internal links and destinations
"""
__version__='''$Id: test_pdfgen_links.py 3959 2012-09-27 14:39:39Z robin $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
#
# Fit tests
#
# Modification History
# ====================
#
# 11-Mar-2003 Ian Sparks
# * Initial version.
#
#
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
import unittest
def markPage(c,height=letter[1],width=letter[0]):
height = height / inch
width = width / inch
for y in range(int(height)):
for x in range(int(width)):
c.drawString(x*inch,y*inch,"x=%d y=%d" % (x,y) )
c.line(x*inch,0,x*inch,height*inch)
c.line(0,y*inch,width*inch,y*inch)
fn = outputfile("test_pdfgen_links.pdf")
class LinkTestCase(unittest.TestCase):
"Test classes."
def test1(self):
c = canvas.Canvas(fn,pagesize=letter)
#Page 1
c.setFont("Courier", 10)
markPage(c)
c.bookmarkPage("P1")
c.addOutlineEntry("Page 1","P1")
#Note : XYZ Left is ignored because at this zoom the whole page fits the screen
c.bookmarkPage("P1_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0.5)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=1)
c.bookmarkPage("P1_XYZ2",fit="XYZ",top=7*inch,left=3*inch,zoom=5)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=1)
c.bookmarkPage("P1_FIT",fit="Fit")
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=1)
c.bookmarkPage("P1_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 1 FitH (top = 2 inch)","P1_FITH",level=1)
c.bookmarkPage("P1_FITV",fit="FitV",left=3*inch)
c.addOutlineEntry("Page 1 FitV (left = 3 inch)","P1_FITV",level=1)
c.bookmarkPage("P1_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=1)
c.bookmarkPage("P1_FORWARD")
c.addOutlineEntry("Forward References","P1_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
#Create link to FitR on page 3
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.blue)
c.drawString(inch+20,inch+20,"Click to jump to the meaning of life")
c.linkAbsolute("","MOL",(inch+10,inch+10,6*inch,2*inch))
c.restoreState()
#Create linkAbsolute to page 2
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(4*inch,4*inch,"Jump to 2.5 inch position on page 2")
c.linkAbsolute("","HYPER_1",(3.75*inch,3.75*inch,8.25*inch,4.25*inch))
c.restoreState()
c.showPage()
#Page 2
c.setFont("Helvetica", 10)
markPage(c)
c.bookmarkPage("P2")
c.addOutlineEntry("Page 2","P2")
#Note : This time left will be at 3*inch because the zoom makes the page to big to fit
c.bookmarkPage("P2_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=2)
c.addOutlineEntry("Page 2 XYZ (top=7,left=3,zoom=2.0)","P2_XYZ",level=1)
c.bookmarkPage("P2_FIT",fit="Fit")
c.addOutlineEntry("Page 2 Fit","P2_FIT",level=1)
c.bookmarkPage("P2_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 2 FitH (top = 2 inch)","P2_FITH",level=1)
c.bookmarkPage("P2_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=1)
c.bookmarkPage("P2_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 2 FitR (left=1,bottom=2,right=5,top=6)","P2_FITR",level=1)
c.bookmarkPage("P2_FORWARD")
c.addOutlineEntry("Forward References","P2_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
c.bookmarkPage("P2_BACKWARD")
c.addOutlineEntry("Backward References","P2_BACKWARD",level=2)
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=3)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=3)
#Horizontal absolute test from page 1. Note that because of the page size used on page 3 all this will do
#is put the view centered on the bookmark. If you want to see it "up close and personal" change page3 to be
#the same page size as the other pages.
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(2.5*inch,2.5*inch,"This line is hyperlinked from page 1")
# c.bookmarkHorizontalAbsolute("HYPER_1",3*inch) #slightly higher than the text otherwise text is of screen above.
c.bookmarkPage("HYPER_1",fit="XYZ",top=2.5*inch,bottom=2*inch)
c.restoreState()
#
c.showPage()
#Page 3
c.setFont("Times-Roman", 10)
#Turn the page on its size and make it 2* the normal "width" in order to have something to test FitV against.
c.setPageSize((2*letter[1],letter[0]))
markPage(c,height=letter[0],width=2*letter[1])
c.bookmarkPage("P3")
c.addOutlineEntry("Page 3 (Double-wide landscape page)","P3")
#Note : XYZ with no zoom (set it to something first
c.bookmarkPage("P3_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=1)
#FitV works here because the page is so wide it can"t all fit on the page
c.bookmarkPage("P3_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 3 FitV (left = 10 inch)","P3_FITV",level=1)
c.bookmarkPage("P3_BACKWARD")
c.addOutlineEntry("Backward References","P3_BACKWARD",level=2)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=3)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=3)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=3)
#Add link from page 1
c.saveState()
c.setFont("Courier", 40)
c.setFillColor(colors.green)
c.drawString(5*inch,6*inch,"42")
c.bookmarkPage("MOL",fit="FitR",left=4*inch,top=7*inch,bottom=4*inch,right=6*inch)
c.showOutline()
c.save()
def makeSuite():
return makeSuiteForClasses(LinkTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
print("wrote", fn)
printLocation()
| bsd-3-clause |
anarchivist/pyflag | src/plugins/PreCanned/Basic.py | 2 | 5709 | """ These are PreCanned Reports.
PreCanned Reports are the PyFlag equivalent of the google 'Im Feeling
Lucky' feature - we basically just dump out some simple queries which
are used to get you started.
"""
import pyflag.Reports as Reports
import pyflag.conf
config=pyflag.conf.ConfObject()
import pyflag.Registry as Registry
class ImFeelingLucky(Reports.report):
"""
'Im Feeling Lucky' is a report which does basic analysis to get
you started on the case. Select which kind of analysis you want to
do.
"""
name = "Im Feeling Lucky"
family = "Disk Forensics"
def get_names(self, cls):
if type(cls.name)==str:
names = [cls.name,]
else:
names = cls.name
return names
def display(self, query, result):
query.clear('filter')
def left_pane_cb(path):
## We expect a directory here:
if not path.endswith('/'): path=path+'/'
seen = []
result = []
for cls in Registry.PRECANNED.classes:
if not cls.name: continue
for name in self.get_names(cls):
if name.startswith(path):
branches = name[len(path):].split('/')
branch = branches[0]
if branch not in seen:
seen.append(branch)
if len(branches)>1:
result.append((branch, branch, "branch"))
else:
result.append((branch, branch, "leaf"))
return result
def right_pane_cb(path, result):
for cls in Registry.PRECANNED.classes:
for name in self.get_names(cls):
if name == path:
query.set("open_tree",path)
cls().display(query, result)
return
result.heading("Precanned Analysis")
result.para("Select the type of automated analysis required. You can use this to get you started, and then drive the analysis further.")
result.tree(tree_cb = left_pane_cb, pane_cb = right_pane_cb)
class Images(Reports.PreCannedCaseTableReports):
""" Display a preview of images """
args = {'filter':' "Thumbnail" has_magic image and "Size" > 20000 ',
'order': 1, 'direction':0}
family = "Disk Forensics"
description = "View all images bigger than 20kb "
name = "/Disk Forensics/Multimedia/Graphics"
default_table = "TypeCaseTable"
columns = ['Thumbnail', 'InodeTable.Size','FileTable.Filename']
class Videos(Reports.PreCannedCaseTableReports):
""" Display a preview of Videos """
args = {'filter':' "Thumbnail" has_magic video',
'order': 1, 'direction':0}
family = "Disk Forensics"
description = "View all Videos "
name = "/Disk Forensics/Multimedia/Videos"
default_table = "TypeCaseTable"
columns = ['Thumbnail', 'InodeTable.Size','FileTable.Filename']
class OfficeFiles(Reports.PreCannedCaseTableReports):
""" Display a preview of Office files """
args = {'filter':' "Thumbnail" has_magic office ',
'order': 1, 'direction':0}
family = "Disk Forensics"
description = "View all Office files "
name = "/Disk Forensics/Multimedia/Office"
default_table = "TypeCaseTable"
columns = ['Thumbnail', 'InodeTable.Size','FileTable.Filename']
class HTMLPages(Registry.PreCanned):
args = {'filter':' "Thumbnail" has_magic HTML ',
'order': 4, 'direction':1}
family = "Disk Forensics"
report = "Browse Types"
description = "View all HTML Pages "
name = "/Disk Forensics/Multimedia/HTML Pages"
class HTMLURLs(Reports.PreCannedCaseTableReports):
args = {'filter': '"Content Type" contains html and Status = 200 ',
'_hidden': [ 4, 5, 6] }
report='Browse HTTP Requests'
family='Network Forensics'
description = 'View all HTML URLs'
name = [ "/Network Forensics/Web Applications/HTML URLs" ]
default_table = 'HTTPCaseTable'
columns = ['Timestamp', 'Inode', 'Method', 'URL', 'Content Type', 'InodeTable.Size', 'Status']
class ImageURLs(Reports.PreCannedCaseTableReports):
description = "Show larger images transferred over HTTP"
name = [ "/Network Forensics/Communications/Web/Images"]
family = 'Network Forensics'
args = {'filter':'Thumbnail has_magic image and Size > 20000',
'order': 0, 'direction': 1 }
default_table = 'HTTPCaseTable'
columns = ['Timestamp','TypeCaseTable.Thumbnail','InodeTable.Size', 'URL']
class VideoURLs(Reports.PreCannedCaseTableReports):
description = "Show videos downloaded over HTTP"
name = [ "/Network Forensics/Communications/Web/Videos"]
family = 'Network Forensics'
args = {'filter':'Thumbnail has_magic video',
'order': 0, 'direction': 1 }
default_table = 'HTTPCaseTable'
columns = ['Timestamp','TypeCaseTable.Thumbnail','InodeTable.Size', 'URL']
class GoogleSearches(Reports.PreCannedCaseTableReports):
description = "Shows possible Google searches."
name = [ "/Network Forensics/Web Applications/Google Searches" ]
family = 'Network Forensics'
args = {'filter': 'Parameter = q and "Content Type" contains html', '_hidden': 5}
default_table = 'HTTPCaseTable'
columns = ['HTTPCaseTable.Timestamp',
'HTTPCaseTable.Inode',
'HTTPParameterCaseTable.Parameter',
'HTTPParameterCaseTable.Value',
'HTTPCaseTable.URL',
'HTTPCaseTable.Content Type']
| gpl-2.0 |
mozilla-services/data-pipeline | reports/stability-summary/utils.py | 4 | 1274 | import boto3
from gzip import GzipFile
from cStringIO import StringIO
import sys
import csv
class S3CompressedWriter(object):
def __init__(self, bucket, path, mimetype='text/plain'):
self.bucket = bucket
self.path = path
self.mimetype = mimetype
self._buffer = None
def __enter__(self):
self._buffer = StringIO();
self._writer = GzipFile(mode="wb", fileobj=self._buffer)
return self._writer
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is None:
self._writer.close()
self._buffer.seek(0)
s3 = boto3.resource('s3')
s3.Object(self.bucket, self.path).put(Body=self._buffer, ContentEncoding='gzip', ContentType=self.mimetype)
self._buffer = None
def __del__(self):
assert self._buffer is None
def S3CompressedReader(bucket, path):
s3 = boto3.resource('s3')
r = s3.Object(bucket, path).get()
body = StringIO(r['Body'].read())
return GzipFile(mode="rb", fileobj=body)
def HeaderCSVReader(fd, *args, **kwargs):
"""
Read CSV data from `fd`, separating the header list from the data.
"""
reader = csv.reader(fd, *args, **kwargs)
header = reader.next()
return header, reader
| mpl-2.0 |
bringsvor/account-financial-tools | __unported__/async_move_line_importer/model/move_line_importer.py | 36 | 14197 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import traceback
import logging
import base64
import threading
import csv
import tempfile
import psycopg2
import openerp.pooler as pooler
from openerp.osv import orm, fields
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class move_line_importer(orm.Model):
"""Asynchrone move / move line importer.
It will parse the saved CSV file using orm.BaseModel.load
in a thread. If you set bypass_orm to True then the load function
will use a totally overridden create function that is a lot faster
but that totally bypass the ORM
"""
_name = "move.line.importer"
_inherit = ['mail.thread']
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update(state='draft', report=False)
return super(move_line_importer, self).copy(cr, uid, id,
default=default,
context=context)
def track_success(sef, cr, uid, obj, context=None):
"""Used by mail subtype"""
return obj['state'] == 'done'
def track_error(sef, cr, uid, obj, context=None):
"""Used by mail subtype"""
return obj['state'] == 'error'
_track = {
'state': {
'async_move_line_importer.mvl_imported': track_success,
'async_move_line_importer.mvl_error': track_error,
},
}
_columns = {
'name': fields.datetime(
'Name',
required=True,
readonly=True
),
'state': fields.selection(
[('draft', 'New'),
('running', 'Running'),
('done', 'Success'),
('error', 'Error')],
readonly=True,
string='Status'
),
'report': fields.text(
'Report',
readonly=True
),
'file': fields.binary(
'File',
required=True
),
'delimiter': fields.selection(
[(',', ','), (';', ';'), ('|', '|')],
string="CSV delimiter",
required=True
),
'company_id': fields.many2one(
'res.company',
'Company'
),
'bypass_orm': fields.boolean(
'Fast import (use with caution)',
help="When enabled import will be faster but"
" it will not use orm and may"
" not support all CSV canvas. \n"
"Entry posted option will be skipped. \n"
"AA lines will only be created when"
" moves are posted. \n"
"Tax lines computation will be skipped. \n"
"This option should be used with caution"
" and in conjonction with provided canvas."
),
}
def _get_current_company(self, cr, uid, context=None,
model="move.line.importer"):
return self.pool.get('res.company')._company_default_get(
cr, uid,
model,
context=context
)
_defaults = {'state': 'draft',
'name': fields.datetime.now(),
'company_id': _get_current_company,
'delimiter': ',',
'bypass_orm': False}
def _parse_csv(self, cr, uid, imp_id):
"""Parse stored CSV file in order to be usable by BaseModel.load method.
Manage base 64 decoding.
:param imp_id: current importer id
:returns: (head [list of first row], data [list of list])
"""
# We use tempfile in order to avoid memory error with large files
with tempfile.TemporaryFile() as src:
imp = self.read(cr, uid, imp_id, ['file', 'delimiter'])
content = imp['file']
delimiter = imp['delimiter']
src.write(content)
with tempfile.TemporaryFile() as decoded:
src.seek(0)
base64.decode(src, decoded)
decoded.seek(0)
return self._prepare_csv_data(decoded, delimiter)
def _prepare_csv_data(self, csv_file, delimiter=","):
"""Parse a decoded CSV file and return head list and data list
:param csv_file: decoded CSV file
:param delimiter: CSV file delimiter char
:returns: (head [list of first row], data [list of list])
"""
try:
data = csv.reader(csv_file, delimiter=str(delimiter))
except csv.Error as error:
raise orm.except_orm(
_('CSV file is malformed'),
_("Maybe you have not choose correct separator \n"
"the error detail is : \n %s") % repr(error)
)
head = data.next()
head = [x.replace(' ', '') for x in head]
# Generator does not work with orm.BaseModel.load
values = [tuple(x) for x in data if x]
return (head, values)
def format_messages(self, messages):
"""Format error messages generated by the BaseModel.load method
:param messages: return of BaseModel.load messages key
:returns: formatted string
"""
res = []
for msg in messages:
rows = msg.get('rows', {})
res.append(_("%s. -- Field: %s -- rows %s to %s") % (
msg.get('message', 'N/A'),
msg.get('field', 'N/A'),
rows.get('from', 'N/A'),
rows.get('to', 'N/A'))
)
return "\n \n".join(res)
def _manage_load_results(self, cr, uid, imp_id, result, _do_commit=True,
context=None):
"""Manage the BaseModel.load function output and store exception.
Will generate success/failure report and store it into report field.
Manage commit and rollback even if load method uses PostgreSQL
Savepoints.
:param imp_id: current importer id
:param result: BaseModel.load returns
{ids: list(int)|False, messages: [Message]}
:param _do_commit: toggle commit management only used
for testing purpose only
:returns: current importer id
"""
# Import sucessful
state = msg = None
if not result['messages']:
msg = _("%s lines imported" % len(result['ids'] or []))
state = 'done'
else:
if _do_commit:
cr.rollback()
msg = self.format_messages(result['messages'])
state = 'error'
return (imp_id, state, msg)
def _write_report(self, cr, uid, imp_id, state, msg, _do_commit=True,
max_tries=5, context=None):
"""Commit report in a separated transaction.
It will avoid concurrent update error due to mail.message.
If transaction trouble happen we try 5 times to rewrite report
:param imp_id: current importer id
:param state: import state
:param msg: report summary
:returns: current importer id
"""
if _do_commit:
db_name = cr.dbname
local_cr = pooler.get_db(db_name).cursor()
try:
self.write(local_cr, uid, [imp_id],
{'state': state, 'report': msg},
context=context)
local_cr.commit()
# We handle concurrent error troubles
except psycopg2.OperationalError as pg_exc:
_logger.error(
"Can not write report. "
"System will retry %s time(s)" % max_tries
)
if (pg_exc.pg_code in orm.PG_CONCURRENCY_ERRORS_TO_RETRY and
max_tries >= 0):
local_cr.rollback()
local_cr.close()
remaining_try = max_tries - 1
self._write_report(cr, uid, imp_id, cr,
_do_commit=_do_commit,
max_tries=remaining_try,
context=context)
else:
_logger.exception(
'Can not log report - Operational update error'
)
raise
except Exception:
_logger.exception('Can not log report')
local_cr.rollback()
raise
finally:
if not local_cr.closed:
local_cr.close()
else:
self.write(cr, uid, [imp_id],
{'state': state, 'report': msg},
context=context)
return imp_id
def _load_data(self, cr, uid, imp_id, head, data, _do_commit=True,
context=None):
"""Function that does the load of parsed CSV file.
If will log exception and susccess into the report fields.
:param imp_id: current importer id
:param head: CSV file head (list of header)
:param data: CSV file content (list of data list)
:param _do_commit: toggle commit management
only used for testing purpose only
:returns: current importer id
"""
state = msg = None
try:
res = self.pool['account.move'].load(cr, uid, head, data,
context=context)
r_id, state, msg = self._manage_load_results(cr, uid, imp_id, res,
_do_commit=_do_commit,
context=context)
except Exception as exc:
if _do_commit:
cr.rollback()
ex_type, sys_exc, tb = sys.exc_info()
tb_msg = ''.join(traceback.format_tb(tb, 30))
_logger.error(tb_msg)
_logger.error(repr(exc))
msg = _("Unexpected exception.\n %s \n %s" % (repr(exc), tb_msg))
state = 'error'
finally:
self._write_report(cr, uid, imp_id, state, msg,
_do_commit=_do_commit, context=context)
if _do_commit:
try:
cr.commit()
except psycopg2.Error:
_logger.exception('Can not do final commit')
cr.close()
return imp_id
def _allows_thread(self, imp_id):
"""Check if there is a async import of this file running
:param imp_id: current importer id
:returns: void
:raise: orm.except in case on failure
"""
for th in threading.enumerate():
if th.getName() == 'async_move_line_import_%s' % imp_id:
raise orm.except_orm(
_('An import of this file is already running'),
_('Please try latter')
)
def _check_permissions(self, cr, uid, context=None):
"""Ensure that user is allowed to create move / move line"""
move_obj = self.pool['account.move']
move_line_obj = self.pool['account.move.line']
move_obj.check_access_rule(cr, uid, [], 'create')
move_obj.check_access_rights(cr, uid, 'create', raise_exception=True)
move_line_obj.check_access_rule(cr, uid, [], 'create')
move_line_obj.check_access_rights(cr, uid, 'create',
raise_exception=True)
def import_file(self, cr, uid, imp_id, context=None):
""" Will do an asynchronous load of a CSV file.
Will generate an success/failure report and generate some
maile threads. It uses BaseModel.load to lookup CSV.
If you set bypass_orm to True then the load function
will use a totally overridden create function that is a lot faster
but that totally bypass the ORM
"""
if isinstance(imp_id, list):
imp_id = imp_id[0]
if context is None:
context = {}
current = self.read(cr, uid, imp_id, ['bypass_orm', 'company_id'],
load='_classic_write')
context['company_id'] = current['company_id']
bypass_orm = current['bypass_orm']
if bypass_orm:
# Tells create funtion to bypass orm
# As we bypass orm we ensure that
# user is allowed to creat move / move line
self._check_permissions(cr, uid, context=context)
context['async_bypass_create'] = True
head, data = self._parse_csv(cr, uid, imp_id)
self.write(cr, uid, [imp_id], {'state': 'running',
'report': _('Import is running')})
self._allows_thread(imp_id)
db_name = cr.dbname
local_cr = pooler.get_db(db_name).cursor()
thread = threading.Thread(target=self._load_data,
name='async_move_line_import_%s' % imp_id,
args=(local_cr, uid, imp_id, head, data),
kwargs={'context': context.copy()})
thread.start()
return {}
| agpl-3.0 |
BrighterCommand/Brightside | brightside/dispatch.py | 1 | 14030 | """
File : dispatch.py
Author : ian
Created : 04-21-2017
Last Modified By : ian
Last Modified On : 04-21-2017
***********************************************************************
The MIT License (MIT)
Copyright © 2017 Ian Cooper <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
***********************************************************************
"""
import logging
import time
from enum import Enum
from multiprocessing import Event, Process
from threading import Thread
from typing import Callable, Dict
from brightside.channels import Channel
from brightside.command_processor import CommandProcessor, Request
from brightside.connection import Connection
from brightside.exceptions import ConfigurationException, MessagingException
from brightside.message_factory import create_quit_message
from brightside.message_pump import MessagePump
from brightside.messaging import BrightsideConsumerConfiguration, BrightsideConsumer, BrightsideMessage
class Performer:
def __init__(self,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request],
logger: logging.Logger=None
) -> None:
"""
Each Performer abstracts a process running a message pump.
That process is forked from the parent, as we cannot guarantee a message pump is only I/O bound and thus will
not scale because of the GIL.
The Performer is how the supervisor (the dispatcher) tracks the workers it has created
The Performer needs:
:param channel_name: The name of the channel we want to create a sub-process for
:param connection: The connection to the broker
:param consumer_factory: We need a user supplied callback to provide us an instance of the concumer for
the broker we are using. Arame? Something else?
:param command_processor_factory: We need a user supplied callback to create a commandprocessor with
subscribers, policies, outgoing tasks queues etc.
:param mapper_func: We need a user supplied callback to map on the wire messages to requests
"""
# TODO: The paramater needs to be a connection, not an AramaConnection as we can't decide to create an Arame Consumer
# here. Where do we make that choice?
self._channel_name = channel_name
self._connection = connection
self._consumer_configuration = consumer_configuration
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
self._logger = logger or logging.getLogger(__name__)
def stop(self) -> None:
self._consumer_configuration.pipeline.put(create_quit_message())
def run(self, started_event: Event) -> Process:
p = Process(target=_sub_process_main, args=(
started_event,
self._channel_name,
self._connection,
self._consumer_configuration,
self._consumer_factory,
self._command_processor_factory,
self._mapper_func))
self._logger.debug("Starting worker process for channel: %s on exchange %s on server %s",
self._channel_name, self._connection.exchange, self._connection.amqp_uri)
p.start()
started_event.wait(timeout=1)
return p
def _sub_process_main(started_event: Event,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
This is the main method for the sub=process, everything we need to create the message pump and
channel it needs to be passed in as parameters that can be pickled as when we run they will be serialized
into this process. The data should be value types, not reference types as we will receive a copy of the original.
Inter-process communication is signalled by the event - to indicate startup - and the pipeline to facilitate a
sentinel or stop message
:param started_event: Used by the sub-process to signal that it is ready
:param channel_name: The name we want to give the channel to the broker for identification
:param connection: The 'broker' connection
:param consumer_configuration: How to configure our consumer of messages from the channel
:param consumer_factory: Callback to create the consumer. User code as we don't know what consumer library they
want to use. Arame? Something else?
:param command_processor_factory: Callback to register subscribers, policies, and task queues then build command
processor. User code that provides us with their requests and handlers
:param mapper_func: We need to map between messages on the wire and our handlers
:return:
"""
logger = logging.getLogger(__name__)
consumer = consumer_factory(connection, consumer_configuration, logger)
channel = Channel(name=channel_name, consumer=consumer, pipeline=consumer_configuration.pipeline)
# TODO: Fix defaults that need passed in config values
command_processor = command_processor_factory(channel_name)
message_pump = MessagePump(command_processor=command_processor, channel=channel, mapper_func=mapper_func,
timeout=500, unacceptable_message_limit=None, requeue_count=None)
logger.debug("Starting the message pump for %s", channel_name)
message_pump.run(started_event)
class ConsumerConfiguration:
def __init__(self,
connection: Connection,
consumer: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
The configuration parameters for one consumer - can create one or more performers from this, each of which is
a message pump reading from a queue
:param connection: The connection to the broker
:param consumer: The consumer we want to create (routing key, queue etc)
:param consumer_factory: A factory to create a consumer to read from a broker, a given implementation i.e. arame
the command processor factory creates a command processor configured for a pipeline
:param mapper_func: Maps between messages on the queue and requests (commnands/events)
"""
self._connection = connection
self._consumer = consumer
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
@property
def connection(self) -> Connection:
return self._connection
@property
def brightside_configuration(self) -> BrightsideConsumerConfiguration:
return self._consumer
@property
def consumer_factory(self) -> Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer]:
return self._consumer_factory
@property
def command_processor_factory(self):
return self._command_processor_factory
@property
def mapper_func(self) -> Callable[[BrightsideMessage], Request]:
return self._mapper_func
class DispatcherState(Enum):
ds_awaiting = 0,
ds_notready = 1,
ds_running = 2,
ds_stopped = 3,
ds_stopping = 4
class Dispatcher:
"""
The dispatcher orchestrates the creation of consumers, where a consumer is the sub-process that runs a message pump
to consumer messages from a given channel and dispatch to handlers. The dispatcher can start more than one performer
for a given channel.
The dispatcher also orchestrates the shutdown of consumers. It does this by posting a stop message into each running
consumers queue, thus allowing the current handler to run to completion but killing the consumer before it can
consume another work item from the queue.
As such the dispatcher tracks consumer instances.
In addition, as we must pass a factory method to the sub-process that creates the command processor for that channel
i.e. handler and policy registration, outgoing queues, the Dispatcher also acts a registry of those factory methods
for individual channels.
THe dispatcher uses a thread to 'stay running' until end is called. This means that receive is non-blocking. The
supervisor thread yields regularly to avoid spinning the CPU. This means there can be a delay between signalling to
end and the shutdown beginning.
Shutdown will finish work in progress, as it inserts a quit message in the queue that gets consumerd 'next'
"""
def __init__(self, consumers: Dict[str, ConsumerConfiguration]) -> None:
self._state = DispatcherState.ds_notready
self._consumers = consumers
self._performers = {k: Performer(
k,
v.connection,
v.brightside_configuration,
v.consumer_factory,
v.command_processor_factory,
v.mapper_func)
for k, v in self._consumers.items()}
self._running_performers = {}
self._supervisor = None
self._state = DispatcherState.ds_awaiting
@property
def state(self):
return self._state
def receive(self):
def _receive(dispatcher: Dispatcher, initialized: Event) -> None:
for k, v in self._performers.items():
event = Event()
dispatcher._running_performers[k] = v.run(event)
event.wait(3) # TODO: Do we want to configure this polling interval?
initialized.set()
while self._state == DispatcherState.ds_running:
time.sleep(5) # yield to avoid spinning, between checking for changes to state
if self._state == DispatcherState.ds_awaiting:
initialized = Event()
self._supervisor = Thread(target=_receive, args=(self, initialized))
initialized.wait(5) # TODO: Should this be number of performs and configured with related?
self._state = DispatcherState.ds_running
self._supervisor.start()
def end(self):
if self._state == DispatcherState.ds_running:
for channel, process in list(self._running_performers.items()):
self._performers[channel].stop()
process.join(10) # TODO: We really want to make this configurable
self._state = DispatcherState.ds_stopping
self._supervisor.join(5)
self._running_performers.clear()
self._supervisor = None
self._state = DispatcherState.ds_stopped
# Do we want to determine if any processes have failed to complete Within the time frame
def open(self, consumer_name: str) -> None:
# TODO: Build then refactor with receive
# Find the consumer
if consumer_name not in self._consumers:
raise ConfigurationException("The consumer {} could not be found, did you register it?".format(consumer_name))
consumer = self._consumers[consumer_name]
performer = Performer(consumer_name,
consumer.connection,
consumer.brightside_configuration,
consumer.consumer_factory,
consumer.command_processor_factory,
consumer.mapper_func)
self._performers[consumer_name] = performer
# if we have a supervisor thread
if self._state == DispatcherState.ds_running:
# start and add to items monitored by supervisor (running performers)
pass
# else
elif self._state == DispatcherState.ds_stopped:
# start the supervisor with the single consumer
self._state = DispatcherState.ds_awaiting
self.receive()
else:
raise MessagingException("Dispatcher in a un-recognised state to open new connection; state was {}", self._state)
| mit |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/direct/directscripts/extract_docs.py | 8 | 10595 | """ This script generates a pandadoc.hpp file representing the Python
wrappers that can be parsed by doxygen to generate the Python documentation.
You need to run this before invoking Doxyfile.python.
It requires a valid makepanda installation with interrogatedb .in
files in the lib/pandac/input directory. """
__all__ = []
import os
import panda3d, pandac
from panda3d.dtoolconfig import *
LICENSE = """PANDA 3D SOFTWARE
Copyright (c) Carnegie Mellon University. All rights reserved.
All use of this software is subject to the terms of the revised BSD
license. You should have received a copy of this license along
with this source code in a file named \"LICENSE.\"""".split("\n")
def comment(code):
if not code:
return ""
comment = ''
empty_line = False
for line in code.splitlines(False):
line = line.strip('\t\n /')
if line:
if empty_line:
# New paragraph.
comment += '\n\n'
empty_line = False
elif comment:
comment += '\n'
comment += '/// ' + line
else:
empty_line = True
if comment:
return comment
else:
return ''
def block_comment(code):
if not code:
return ""
lines = code.split("\n")
newlines = []
indent = 0
reading_desc = False
for line in lines:
if line.startswith("////"):
continue
line = line.rstrip()
strline = line.lstrip('/ \t')
if ':' in strline:
pre, post = strline.split(':', 1)
pre = pre.rstrip()
if pre == "Description":
strline = post.lstrip()
elif pre in ("Class", "Access", "Function", "Created by", "Enum"):
continue
if strline or len(newlines) > 0:
newlines.append('/// ' + strline)
#if reading_desc:
# newlines.append('/// ' + line[min(indent, len(line) - len(strline)):])
#else:
# # A "Description:" text starts the description.
# if strline.startswith("Description"):
# strline = strline[11:].lstrip(': \t')
# indent = len(line) - len(strline)
# reading_desc = True
# newlines.append('/// ' + strline)
# else:
# print line
newcode = '\n'.join(newlines)
if len(newcode) > 0:
return newcode
else:
return ""
def translateFunctionName(name):
if name.startswith("__"):
return name
new = ""
for i in name.split("_"):
if new == "":
new += i
elif i == "":
pass
elif len(i) == 1:
new += i[0].upper()
else:
new += i[0].upper() + i[1:]
return new
def translateTypeName(name, mangle=True):
# Equivalent to C++ classNameFromCppName
class_name = ""
bad_chars = "!@#$%^&*()<>,.-=+~{}? "
next_cap = False
first_char = mangle
for chr in name:
if (chr == '_' or chr == ' ') and mangle:
next_cap = True
elif chr in bad_chars:
if not mangle:
class_name += '_'
elif next_cap or first_char:
class_name += chr.upper()
next_cap = False
first_char = False
else:
class_name += chr
return class_name
def translated_type_name(type, scoped=True):
while interrogate_type_is_wrapped(type):
if interrogate_type_is_const(type):
return 'const ' + translated_type_name(interrogate_type_wrapped_type(type))
else:
type = interrogate_type_wrapped_type(type)
typename = interrogate_type_name(type)
if typename in ("PyObject", "_object"):
return "object"
elif typename == "PN_stdfloat":
return "float"
if interrogate_type_is_atomic(type):
token = interrogate_type_atomic_token(type)
if token == 7:
return 'str'
else:
return typename
if not typename.endswith('_t'):
# Hack: don't mangle size_t etc.
typename = translateTypeName(typename)
if scoped and interrogate_type_is_nested(type):
return translated_type_name(interrogate_type_outer_class(type)) + '::' + typename
else:
return typename
def processElement(handle, element):
if interrogate_element_has_comment(element):
print >>handle, comment(interrogate_element_comment(element))
print >>handle, translated_type_name(interrogate_element_type(element)),
print >>handle, interrogate_element_name(element) + ';'
def processFunction(handle, function, isConstructor = False):
for i_wrapper in xrange(interrogate_function_number_of_python_wrappers(function)):
wrapper = interrogate_function_python_wrapper(function, i_wrapper)
if interrogate_wrapper_has_comment(wrapper):
print >>handle, block_comment(interrogate_wrapper_comment(wrapper))
if not isConstructor:
if interrogate_function_is_method(function):
if not interrogate_wrapper_number_of_parameters(wrapper) > 0 or not interrogate_wrapper_parameter_is_this(wrapper, 0):
print >>handle, "static",
if interrogate_wrapper_has_return_value(wrapper):
print >>handle, translated_type_name(interrogate_wrapper_return_type(wrapper)),
else:
pass#print >>handle, "void",
print >>handle, translateFunctionName(interrogate_function_name(function)) + "(",
else:
print >>handle, "__init__(",
first = True
for i_param in range(interrogate_wrapper_number_of_parameters(wrapper)):
if not interrogate_wrapper_parameter_is_this(wrapper, i_param):
if not first:
print >>handle, ",",
print >>handle, translated_type_name(interrogate_wrapper_parameter_type(wrapper, i_param)),
if interrogate_wrapper_parameter_has_name(wrapper, i_param):
print >>handle, interrogate_wrapper_parameter_name(wrapper, i_param),
first = False
print >>handle, ");"
def processType(handle, type):
typename = translated_type_name(type, scoped=False)
derivations = [ translated_type_name(interrogate_type_get_derivation(type, n)) for n in range(interrogate_type_number_of_derivations(type)) ]
if interrogate_type_has_comment(type):
print >>handle, block_comment(interrogate_type_comment(type))
if interrogate_type_is_enum(type):
print >>handle, "enum %s {" % typename
for i_value in range(interrogate_type_number_of_enum_values(type)):
docstring = comment(interrogate_type_enum_value_comment(type, i_value))
if docstring:
print >>handle, docstring
print >>handle, interrogate_type_enum_value_name(type, i_value), "=", interrogate_type_enum_value(type, i_value), ","
elif interrogate_type_is_typedef(type):
wrapped_type = translated_type_name(interrogate_type_wrapped_type(type))
print >>handle, "typedef %s %s;" % (wrapped_type, typename)
return
else:
if interrogate_type_is_struct(type):
classtype = "struct"
elif interrogate_type_is_class(type):
classtype = "class"
elif interrogate_type_is_union(type):
classtype = "union"
else:
print "I don't know what type %s is" % interrogate_type_true_name(type)
return
if len(derivations) > 0:
print >>handle, "%s %s : public %s {" % (classtype, typename, ", public ".join(derivations))
else:
print >>handle, "%s %s {" % (classtype, typename)
print >>handle, "public:"
for i_ntype in xrange(interrogate_type_number_of_nested_types(type)):
processType(handle, interrogate_type_get_nested_type(type, i_ntype))
for i_method in xrange(interrogate_type_number_of_constructors(type)):
processFunction(handle, interrogate_type_get_constructor(type, i_method), True)
for i_method in xrange(interrogate_type_number_of_methods(type)):
processFunction(handle, interrogate_type_get_method(type, i_method))
for i_method in xrange(interrogate_type_number_of_make_seqs(type)):
print >>handle, "list", translateFunctionName(interrogate_make_seq_seq_name(interrogate_type_get_make_seq(type, i_method))), "();"
for i_element in xrange(interrogate_type_number_of_elements(type)):
processElement(handle, interrogate_type_get_element(type, i_element))
print >>handle, "};"
def processModule(handle, package):
print >>handle, "namespace %s {" % package
if package != "core":
print >>handle, "using namespace core;"
for i_type in xrange(interrogate_number_of_global_types()):
type = interrogate_get_global_type(i_type)
if interrogate_type_has_module_name(type):
module_name = interrogate_type_module_name(type)
if "panda3d." + package == module_name:
processType(handle, type)
else:
print "Type %s has no module name" % typename
for i_func in xrange(interrogate_number_of_global_functions()):
func = interrogate_get_global_function(i_func)
if interrogate_function_has_module_name(func):
module_name = interrogate_function_module_name(func)
if "panda3d." + package == module_name:
processFunction(handle, func)
else:
print "Type %s has no module name" % typename
print >>handle, "}"
if __name__ == "__main__":
handle = open("pandadoc.hpp", "w")
print >>handle, comment("Panda3D modules that are implemented in C++.")
print >>handle, "namespace panda3d {"
# Determine the path to the interrogatedb files
interrogate_add_search_directory(os.path.join(os.path.dirname(pandac.__file__), "..", "..", "etc"))
interrogate_add_search_directory(os.path.join(os.path.dirname(pandac.__file__), "input"))
import panda3d.core
processModule(handle, "core")
for lib in os.listdir(os.path.dirname(panda3d.__file__)):
if lib.endswith(('.pyd', '.so')) and not lib.startswith('core.'):
module_name = os.path.splitext(lib)[0]
__import__("panda3d." + module_name)
processModule(handle, module_name)
print >>handle, "}"
handle.close()
| apache-2.0 |
wkoathp/glance | glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py | 17 | 3573 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint
from oslo_db import exception as db_exception
from sqlalchemy import and_, func, orm
from sqlalchemy import MetaData, Table
from sqlalchemy.exc import OperationalError, ProgrammingError
NEW_KEYNAME = 'image_members_image_id_member_deleted_at_key'
ORIGINAL_KEYNAME_RE = re.compile('image_members_image_id.*_key')
def upgrade(migrate_engine):
image_members = _get_image_members_table(migrate_engine)
if migrate_engine.name in ('mysql', 'postgresql'):
try:
UniqueConstraint('image_id',
name=_get_original_keyname(migrate_engine.name),
table=image_members).drop()
except (OperationalError, ProgrammingError, db_exception.DBError):
UniqueConstraint('image_id',
name=_infer_original_keyname(image_members),
table=image_members).drop()
UniqueConstraint('image_id',
'member',
'deleted_at',
name=NEW_KEYNAME,
table=image_members).create()
def downgrade(migrate_engine):
image_members = _get_image_members_table(migrate_engine)
if migrate_engine.name in ('mysql', 'postgresql'):
_sanitize(migrate_engine, image_members)
UniqueConstraint('image_id',
name=NEW_KEYNAME,
table=image_members).drop()
UniqueConstraint('image_id',
'member',
name=_get_original_keyname(migrate_engine.name),
table=image_members).create()
def _get_image_members_table(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
return Table('image_members', meta, autoload=True)
def _get_original_keyname(db):
return {'mysql': 'image_id',
'postgresql': 'image_members_image_id_member_key'}[db]
def _infer_original_keyname(table):
for i in table.indexes:
if ORIGINAL_KEYNAME_RE.match(i.name):
return i.name
def _sanitize(migrate_engine, table):
"""
Avoid possible integrity error by removing deleted rows
to accommodate less restrictive uniqueness constraint
"""
session = orm.sessionmaker(bind=migrate_engine)()
# find the image_member rows containing duplicate combinations
# of image_id and member
qry = (session.query(table.c.image_id, table.c.member)
.group_by(table.c.image_id, table.c.member)
.having(func.count() > 1))
for image_id, member in qry:
# only remove duplicate rows already marked deleted
d = table.delete().where(and_(table.c.deleted == True,
table.c.image_id == image_id,
table.c.member == member))
d.execute()
session.close()
| apache-2.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/docutils/parsers/rst/languages/gl.py | 130 | 3711 | # -*- coding: utf-8 -*-
# Author: David Goodger
# Contact: [email protected]
# Revision: $Revision: 4229 $
# Date: $Date: 2005-12-23 00:46:16 +0100 (Fri, 23 Dec 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Galician-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'atenci\u00f3n': 'attention',
u'advertencia': 'caution',
u'code (translation required)': 'code',
u'perigo': 'danger',
u'erro': 'error',
u'pista': 'hint',
u'importante': 'important',
u'nota': 'note',
u'consello': 'tip',
u'aviso': 'warning',
u'admonici\u00f3n': 'admonition',
u'barra lateral': 'sidebar',
u't\u00f3pico': 'topic',
u'bloque-li\u00f1a': 'line-block',
u'literal-analizado': 'parsed-literal',
u'r\u00fabrica': 'rubric',
u'ep\u00edgrafe': 'epigraph',
u'realzados': 'highlights',
u'coller-citaci\u00f3n': 'pull-quote',
u'compor': 'compound',
u'recipiente': 'container',
#'questions': 'questions',
u't\u00e1boa': 'table',
u't\u00e1boa-csv': 'csv-table',
u't\u00e1boa-listaxe': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'imaxe': 'image',
u'figura': 'figure',
u'inclu\u00edr': 'include',
u'cru': 'raw',
u'substitu\u00edr': 'replace',
u'unicode': 'unicode',
u'data': 'date',
u'clase': 'class',
u'regra': 'role',
u'regra-predeterminada': 'default-role',
u't\u00edtulo': 'title',
u'contido': 'contents',
u'seccnum': 'sectnum',
u'secci\u00f3n-numerar': 'sectnum',
u'cabeceira': 'header',
u'p\u00e9 de p\u00e1xina': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'notas-destino': 'target-notes',
u'texto restruturado-proba-directiva': 'restructuredtext-test-directive'}
"""Galician name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abreviatura': 'abbreviation',
u'ab': 'abbreviation',
u'acr\u00f3nimo': 'acronym',
u'ac': 'acronym',
u'code (translation required)': 'code',
u'\u00edndice': 'index',
u'i': 'index',
u'sub\u00edndice': 'subscript',
u'sub': 'subscript',
u'super\u00edndice': 'superscript',
u'sup': 'superscript',
u'referencia t\u00edtulo': 'title-reference',
u't\u00edtulo': 'title-reference',
u't': 'title-reference',
u'referencia-pep': 'pep-reference',
u'pep': 'pep-reference',
u'referencia-rfc': 'rfc-reference',
u'rfc': 'rfc-reference',
u'\u00e9nfase': 'emphasis',
u'forte': 'strong',
u'literal': 'literal',
'math (translation required)': 'math',
u'referencia-nome': 'named-reference',
u'referencia-an\u00f3nimo': 'anonymous-reference',
u'referencia-nota ao p\u00e9': 'footnote-reference',
u'referencia-citaci\u00f3n': 'citation-reference',
u'referencia-substituci\u00f3n': 'substitution-reference',
u'destino': 'target',
u'referencia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'cru': 'raw',}
"""Mapping of Galician role names to canonical role names for interpreted text.
"""
| gpl-2.0 |
bepitulaz/huntingdimana | env/Lib/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | 1786 | 2504 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| gpl-3.0 |
citrix-openstack/build-ryu | ryu/tests/integrated/test_add_flow_v12_actions.py | 5 | 15350 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.tests.integrated import tester
LOG = logging.getLogger(__name__)
class RunTest(tester.TestFlowBase):
""" Test case for add flows of Actions
"""
OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RunTest, self).__init__(*args, **kwargs)
self._verify = []
def add_apply_actions(self, dp, actions, match=None):
inst = [dp.ofproto_parser.OFPInstructionActions(
dp.ofproto.OFPIT_APPLY_ACTIONS, actions)]
if match is None:
match = dp.ofproto_parser.OFPMatch()
m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, 0,
dp.ofproto.OFPFC_ADD,
0, 0, 0xff, 0xffffffff,
dp.ofproto.OFPP_ANY,
dp.ofproto.OFPG_ANY,
0, match, inst)
dp.send_msg(m)
def add_set_field_action(self, dp, field, value, match=None):
self._verify = [dp.ofproto.OFPAT_SET_FIELD,
'field', field, value]
f = dp.ofproto_parser.OFPMatchField.make(field, value)
actions = [dp.ofproto_parser.OFPActionSetField(f), ]
self.add_apply_actions(dp, actions, match=match)
def verify_default(self, dp, stats):
verify = self._verify
self._verify = []
type_ = name = field = value = None
if len(verify) == 1:
(type_, ) = verify
elif len(verify) == 3:
(type_, name, value) = verify
elif len(verify) == 4:
(type_, name, field, value) = verify
else:
return "self._verify is invalid."
try:
action = stats[0].instructions[0].actions[0]
if action.cls_action_type != type_:
return "Action type error. send:%s, val:%s" \
% (type_, action.cls_action_type)
except IndexError:
return "Action is not setting."
s_val = None
if name:
try:
s_val = getattr(action, name)
except AttributeError:
pass
if name == 'field':
if s_val.header != field:
return "Field error. send:%s val:%s" \
% (field, s_val.field)
s_val = s_val.value
if name and s_val != value:
return "Value error. send:%s=%s val:%s" \
% (name, value, s_val)
return True
def verify_action_drop(self, dp, stats):
for s in stats:
for i in s.instructions:
if len(i.actions):
return "has actions. %s" % (i.actions)
return True
# Test of General Actions
def test_action_output(self, dp):
out_port = 255
self._verify = [dp.ofproto.OFPAT_OUTPUT,
'port', out_port]
actions = [dp.ofproto_parser.OFPActionOutput(out_port, 0), ]
self.add_apply_actions(dp, actions)
def test_action_drop(self, dp):
self.add_apply_actions(dp, [])
# Test of Push-Tag/Pop-Tag Actions
def test_action_push_vlan(self, dp):
ethertype = ether.ETH_TYPE_8021Q
self._verify = [dp.ofproto.OFPAT_PUSH_VLAN,
'ethertype', ethertype]
actions = [dp.ofproto_parser.OFPActionPushVlan(ethertype)]
self.add_apply_actions(dp, actions)
def test_action_pop_vlan(self, dp):
self._verify = [dp.ofproto.OFPAT_POP_VLAN, ]
actions = [dp.ofproto_parser.OFPActionPopVlan(), ]
self.add_apply_actions(dp, actions)
def test_action_push_mpls(self, dp):
ethertype = ether.ETH_TYPE_MPLS
self._verify = [dp.ofproto.OFPAT_PUSH_MPLS,
'ethertype', ethertype]
actions = [dp.ofproto_parser.OFPActionPushMpls(ethertype), ]
self.add_apply_actions(dp, actions)
def test_action_pop_mpls(self, dp):
ethertype = ether.ETH_TYPE_8021Q
self._verify = [dp.ofproto.OFPAT_POP_MPLS,
'ethertype', ethertype]
actions = [dp.ofproto_parser.OFPActionPopMpls(ethertype), ]
self.add_apply_actions(dp, actions)
# Test of Set-Filed Actions
def test_action_set_field_dl_dst(self, dp):
field = dp.ofproto.OXM_OF_ETH_DST
dl_dst = 'e2:7a:09:79:0b:0f'
value = self.haddr_to_bin(dl_dst)
self.add_set_field_action(dp, field, value)
def test_action_set_field_dl_src(self, dp):
field = dp.ofproto.OXM_OF_ETH_SRC
dl_src = '08:82:63:b6:62:05'
value = self.haddr_to_bin(dl_src)
self.add_set_field_action(dp, field, value)
def test_action_set_field_dl_type(self, dp):
field = dp.ofproto.OXM_OF_ETH_TYPE
value = ether.ETH_TYPE_IPV6
self.add_set_field_action(dp, field, value)
def test_action_set_field_vlan_vid(self, dp):
field = dp.ofproto.OXM_OF_VLAN_VID
value = 0x1e4
self.add_set_field_action(dp, field, value)
def test_action_set_field_vlan_pcp(self, dp):
field = dp.ofproto.OXM_OF_VLAN_PCP
value = 3
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid(1)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_nw_dscp(self, dp):
field = dp.ofproto.OXM_OF_IP_DSCP
value = 32
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_nw_ecn(self, dp):
field = dp.ofproto.OXM_OF_IP_ECN
value = 1
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ip_proto(self, dp):
field = dp.ofproto.OXM_OF_IP_PROTO
value = inet.IPPROTO_TCP
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ipv4_src(self, dp):
field = dp.ofproto.OXM_OF_IPV4_SRC
ipv4_src = '192.168.3.92'
value = self.ipv4_to_int(ipv4_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ipv4_dst(self, dp):
field = dp.ofproto.OXM_OF_IPV4_DST
ipv4_dst = '192.168.74.122'
value = self.ipv4_to_int(ipv4_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_tcp_src(self, dp):
field = dp.ofproto.OXM_OF_TCP_SRC
value = 105
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_TCP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_tcp_dst(self, dp):
field = dp.ofproto.OXM_OF_TCP_DST
value = 75
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_TCP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_udp_src(self, dp):
field = dp.ofproto.OXM_OF_UDP_SRC
value = 197
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_UDP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_udp_dst(self, dp):
field = dp.ofproto.OXM_OF_UDP_DST
value = 17
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_UDP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_icmpv4_type(self, dp):
field = dp.ofproto.OXM_OF_ICMPV4_TYPE
value = 8
match = dp.ofproto_parser.OFPMatch()
match.set_ip_proto(inet.IPPROTO_ICMP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_icmpv4_code(self, dp):
field = dp.ofproto.OXM_OF_ICMPV4_CODE
value = 2
match = dp.ofproto_parser.OFPMatch()
match.set_ip_proto(inet.IPPROTO_ICMP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_op(self, dp):
field = dp.ofproto.OXM_OF_ARP_OP
value = 2
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_spa(self, dp):
field = dp.ofproto.OXM_OF_ARP_SPA
nw_src = '192.168.132.179'
value = self.ipv4_to_int(nw_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_tpa(self, dp):
field = dp.ofproto.OXM_OF_ARP_TPA
nw_dst = '192.168.118.85'
value = self.ipv4_to_int(nw_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_sha(self, dp):
field = dp.ofproto.OXM_OF_ARP_SHA
arp_sha = '50:29:e7:7f:6c:7f'
value = self.haddr_to_bin(arp_sha)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_tha(self, dp):
field = dp.ofproto.OXM_OF_ARP_THA
arp_tha = '71:c8:72:2f:47:fd'
value = self.haddr_to_bin(arp_tha)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ipv6_src(self, dp):
field = dp.ofproto.OXM_OF_IPV6_SRC
ipv6_src = '7527:c798:c772:4a18:117a:14ff:c1b6:e4ef'
value = self.ipv6_to_int(ipv6_src)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_dst(self, dp):
field = dp.ofproto.OXM_OF_IPV6_DST
ipv6_dst = '8893:65b3:6b49:3bdb:3d2:9401:866c:c96'
value = self.ipv6_to_int(ipv6_dst)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_flabel(self, dp):
field = dp.ofproto.OXM_OF_IPV6_FLABEL
value = 0x2c12
self.add_set_field_action(dp, field, value)
def test_action_set_field_icmpv6_type(self, dp):
field = dp.ofproto.OXM_OF_ICMPV6_TYPE
value = 129
self.add_set_field_action(dp, field, value)
def test_action_set_field_icmpv6_code(self, dp):
field = dp.ofproto.OXM_OF_ICMPV6_CODE
value = 2
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_nd_target(self, dp):
field = dp.ofproto.OXM_OF_IPV6_ND_TARGET
target = "5420:db3f:921b:3e33:2791:98f:dd7f:2e19"
value = self.ipv6_to_int(target)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_nd_sll(self, dp):
field = dp.ofproto.OXM_OF_IPV6_ND_SLL
sll = "54:db:3f:3e:27:19"
value = self.haddr_to_bin(sll)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_nd_tll(self, dp):
field = dp.ofproto.OXM_OF_IPV6_ND_TLL
tll = "83:13:48:1e:d0:b0"
value = self.haddr_to_bin(tll)
self.add_set_field_action(dp, field, value)
def test_action_set_field_mpls_label(self, dp):
field = dp.ofproto.OXM_OF_MPLS_LABEL
value = 0x4c
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_MPLS)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_mpls_tc(self, dp):
field = dp.ofproto.OXM_OF_MPLS_TC
value = 0b101
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_MPLS)
self.add_set_field_action(dp, field, value, match)
# Test of Change-TTL Actions
def test_action_set_mpls_ttl(self, dp):
mpls_ttl = 8
self._verify = [dp.ofproto.OFPAT_SET_MPLS_TTL,
'mpls_ttl', mpls_ttl]
actions = [dp.ofproto_parser.OFPActionSetMplsTtl(mpls_ttl), ]
self.add_apply_actions(dp, actions)
def test_action_dec_mpls_ttl(self, dp):
self._verify = [dp.ofproto.OFPAT_DEC_MPLS_TTL]
actions = [dp.ofproto_parser.OFPActionDecMplsTtl(), ]
self.add_apply_actions(dp, actions)
def test_action_set_nw_ttl(self, dp):
nw_ttl = 64
self._verify = [dp.ofproto.OFPAT_SET_NW_TTL,
'nw_ttl', nw_ttl]
actions = [dp.ofproto_parser.OFPActionSetNwTtl(nw_ttl), ]
self.add_apply_actions(dp, actions)
def test_action_dec_nw_ttl(self, dp):
self._verify = [dp.ofproto.OFPAT_DEC_NW_TTL]
actions = [dp.ofproto_parser.OFPActionDecNwTtl(), ]
self.add_apply_actions(dp, actions)
def test_action_copy_ttl_out(self, dp):
self._verify = [dp.ofproto.OFPAT_COPY_TTL_OUT]
actions = [dp.ofproto_parser.OFPActionCopyTtlOut(), ]
self.add_apply_actions(dp, actions)
def test_action_copy_ttl_in(self, dp):
self._verify = [dp.ofproto.OFPAT_COPY_TTL_IN]
actions = [dp.ofproto_parser.OFPActionCopyTtlIn(), ]
self.add_apply_actions(dp, actions)
def is_supported(self, t):
# Open vSwitch 1.10 does not support MPLS yet.
unsupported = [
'test_action_set_field_ip_proto',
'test_action_set_field_dl_type',
'test_action_set_field_arp',
'test_action_set_field_ipv6',
'test_action_set_field_icmp',
'test_action_set_nw_ttl',
'test_action_copy_ttl_in',
'test_action_copy_ttl_out',
'test_action_dec_mpls_ttl',
'test_action_pop_mpls',
'test_action_push_mpls',
'test_action_set_field_mpls_label',
'test_action_set_field_mpls_tc',
'test_action_set_mpls_ttl'
]
for u in unsupported:
if t.find(u) != -1:
return False
return True
| apache-2.0 |
snnn/tensorflow | tensorflow/python/debug/cli/tensor_format.py | 44 | 20447 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Format tensors (ndarrays) for screen display and navigation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.lib import debug_data
_NUMPY_OMISSION = "...,"
_NUMPY_DEFAULT_EDGE_ITEMS = 3
_NUMBER_REGEX = re.compile(r"[-+]?([0-9][-+0-9eE\.]+|nan|inf)(\s|,|\])")
BEGIN_INDICES_KEY = "i0"
OMITTED_INDICES_KEY = "omitted"
DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR = "bold"
class HighlightOptions(object):
"""Options for highlighting elements of a tensor."""
def __init__(self,
criterion,
description=None,
font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):
"""Constructor of HighlightOptions.
Args:
criterion: (callable) A callable of the following signature:
def to_highlight(X):
# Args:
# X: The tensor to highlight elements in.
#
# Returns:
# (boolean ndarray) A boolean ndarray of the same shape as X
# indicating which elements are to be highlighted (iff True).
This callable will be used as the argument of np.argwhere() to
determine which elements of the tensor are to be highlighted.
description: (str) Description of the highlight criterion embodied by
criterion.
font_attr: (str) Font attribute to be applied to the
highlighted elements.
"""
self.criterion = criterion
self.description = description
self.font_attr = font_attr
def format_tensor(tensor,
tensor_label,
include_metadata=False,
auxiliary_message=None,
include_numeric_summary=False,
np_printoptions=None,
highlight_options=None):
"""Generate a RichTextLines object showing a tensor in formatted style.
Args:
tensor: The tensor to be displayed, as a numpy ndarray or other
appropriate format (e.g., None representing uninitialized tensors).
tensor_label: A label for the tensor, as a string. If set to None, will
suppress the tensor name line in the return value.
include_metadata: Whether metadata such as dtype and shape are to be
included in the formatted text.
auxiliary_message: An auxiliary message to display under the tensor label,
dtype and shape information lines.
include_numeric_summary: Whether a text summary of the numeric values (if
applicable) will be included.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions() to set the text format for display numpy
ndarrays.
highlight_options: (HighlightOptions) options for highlighting elements
of the tensor.
Returns:
A RichTextLines object. Its annotation field has line-by-line markups to
indicate which indices in the array the first element of each line
corresponds to.
"""
lines = []
font_attr_segs = {}
if tensor_label is not None:
lines.append("Tensor \"%s\":" % tensor_label)
suffix = tensor_label.split(":")[-1]
if suffix.isdigit():
# Suffix is a number. Assume it is the output slot index.
font_attr_segs[0] = [(8, 8 + len(tensor_label), "bold")]
else:
# Suffix is not a number. It is auxiliary information such as the debug
# op type. In this case, highlight the suffix with a different color.
debug_op_len = len(suffix)
proper_len = len(tensor_label) - debug_op_len - 1
font_attr_segs[0] = [
(8, 8 + proper_len, "bold"),
(8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, "yellow")
]
if isinstance(tensor, debug_data.InconvertibleTensorProto):
if lines:
lines.append("")
lines.extend(str(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
elif not isinstance(tensor, np.ndarray):
# If tensor is not a np.ndarray, return simple text-line representation of
# the object without annotations.
if lines:
lines.append("")
lines.extend(repr(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
if include_metadata:
lines.append(" dtype: %s" % str(tensor.dtype))
lines.append(" shape: %s" % str(tensor.shape).replace("L", ""))
if lines:
lines.append("")
formatted = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
if auxiliary_message:
formatted.extend(auxiliary_message)
if include_numeric_summary:
formatted.append("Numeric summary:")
formatted.extend(numeric_summary(tensor))
formatted.append("")
# Apply custom string formatting options for numpy ndarray.
if np_printoptions is not None:
np.set_printoptions(**np_printoptions)
array_lines = repr(tensor).split("\n")
if tensor.dtype.type is not np.string_:
# Parse array lines to get beginning indices for each line.
# TODO(cais): Currently, we do not annotate string-type tensors due to
# difficulty in escaping sequences. Address this issue.
annotations = _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=np_printoptions)
else:
annotations = None
formatted_array = debugger_cli_common.RichTextLines(
array_lines, annotations=annotations)
formatted.extend(formatted_array)
# Perform optional highlighting.
if highlight_options is not None:
indices_list = list(np.argwhere(highlight_options.criterion(tensor)))
total_elements = np.size(tensor)
highlight_summary = "Highlighted%s: %d of %d element(s) (%.2f%%)" % (
"(%s)" % highlight_options.description if highlight_options.description
else "", len(indices_list), total_elements,
len(indices_list) / float(total_elements) * 100.0)
formatted.lines[0] += " " + highlight_summary
if indices_list:
indices_list = [list(indices) for indices in indices_list]
are_omitted, rows, start_cols, end_cols = locate_tensor_element(
formatted, indices_list)
for is_omitted, row, start_col, end_col in zip(are_omitted, rows,
start_cols, end_cols):
if is_omitted or start_col is None or end_col is None:
continue
if row in formatted.font_attr_segs:
formatted.font_attr_segs[row].append(
(start_col, end_col, highlight_options.font_attr))
else:
formatted.font_attr_segs[row] = [(start_col, end_col,
highlight_options.font_attr)]
return formatted
def _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=None, offset=0):
"""Generate annotations for line-by-line begin indices of tensor text.
Parse the numpy-generated text representation of a numpy ndarray to
determine the indices of the first element of each text line (if any
element is present in the line).
For example, given the following multi-line ndarray text representation:
["array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])"]
the generate annotation will be:
{0: {BEGIN_INDICES_KEY: [0, 0]},
1: {BEGIN_INDICES_KEY: [1, 0]},
2: {BEGIN_INDICES_KEY: [2, 0]},
3: {BEGIN_INDICES_KEY: [3, 0]}}
Args:
array_lines: Text lines representing the tensor, as a list of str.
tensor: The tensor being formatted as string.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions().
offset: Line number offset applied to the line indices in the returned
annotation.
Returns:
An annotation as a dict.
"""
if np_printoptions and "edgeitems" in np_printoptions:
edge_items = np_printoptions["edgeitems"]
else:
edge_items = _NUMPY_DEFAULT_EDGE_ITEMS
annotations = {}
# Put metadata about the tensor in the annotations["tensor_metadata"].
annotations["tensor_metadata"] = {
"dtype": tensor.dtype, "shape": tensor.shape}
dims = np.shape(tensor)
ndims = len(dims)
if ndims == 0:
# No indices for a 0D tensor.
return annotations
curr_indices = [0] * len(dims)
curr_dim = 0
for i in xrange(len(array_lines)):
line = array_lines[i].strip()
if not line:
# Skip empty lines, which can appear for >= 3D arrays.
continue
if line == _NUMPY_OMISSION:
annotations[offset + i] = {OMITTED_INDICES_KEY: copy.copy(curr_indices)}
curr_indices[curr_dim - 1] = dims[curr_dim - 1] - edge_items
else:
num_lbrackets = line.count("[") # TODO(cais): String array escaping.
num_rbrackets = line.count("]")
curr_dim += num_lbrackets - num_rbrackets
annotations[offset + i] = {BEGIN_INDICES_KEY: copy.copy(curr_indices)}
if num_rbrackets == 0:
line_content = line[line.rfind("[") + 1:]
num_elements = line_content.count(",")
curr_indices[curr_dim - 1] += num_elements
else:
if curr_dim > 0:
curr_indices[curr_dim - 1] += 1
for k in xrange(curr_dim, ndims):
curr_indices[k] = 0
return annotations
def locate_tensor_element(formatted, indices):
"""Locate a tensor element in formatted text lines, given element indices.
Given a RichTextLines object representing a tensor and indices of the sought
element, return the row number at which the element is located (if exists).
Args:
formatted: A RichTextLines object containing formatted text lines
representing the tensor.
indices: Indices of the sought element, as a list of int or a list of list
of int. The former case is for a single set of indices to look up,
whereas the latter case is for looking up a batch of indices sets at once.
In the latter case, the indices must be in ascending order, or a
ValueError will be raised.
Returns:
1) A boolean indicating whether the element falls into an omitted line.
2) Row index.
3) Column start index, i.e., the first column in which the representation
of the specified tensor starts, if it can be determined. If it cannot
be determined (e.g., due to ellipsis), None.
4) Column end index, i.e., the column right after the last column that
represents the specified tensor. Iff it cannot be determined, None.
For return values described above are based on a single set of indices to
look up. In the case of batch mode (multiple sets of indices), the return
values will be lists of the types described above.
Raises:
AttributeError: If:
Input argument "formatted" does not have the required annotations.
ValueError: If:
1) Indices do not match the dimensions of the tensor, or
2) Indices exceed sizes of the tensor, or
3) Indices contain negative value(s).
4) If in batch mode, and if not all sets of indices are in ascending
order.
"""
if isinstance(indices[0], list):
indices_list = indices
input_batch = True
else:
indices_list = [indices]
input_batch = False
# Check that tensor_metadata is available.
if "tensor_metadata" not in formatted.annotations:
raise AttributeError("tensor_metadata is not available in annotations.")
# Sanity check on input argument.
_validate_indices_list(indices_list, formatted)
dims = formatted.annotations["tensor_metadata"]["shape"]
batch_size = len(indices_list)
lines = formatted.lines
annot = formatted.annotations
prev_r = 0
prev_line = ""
prev_indices = [0] * len(dims)
# Initialize return values
are_omitted = [None] * batch_size
row_indices = [None] * batch_size
start_columns = [None] * batch_size
end_columns = [None] * batch_size
batch_pos = 0 # Current position in the batch.
for r in xrange(len(lines)):
if r not in annot:
continue
if BEGIN_INDICES_KEY in annot[r]:
indices_key = BEGIN_INDICES_KEY
elif OMITTED_INDICES_KEY in annot[r]:
indices_key = OMITTED_INDICES_KEY
matching_indices_list = [
ind for ind in indices_list[batch_pos:]
if prev_indices <= ind < annot[r][indices_key]
]
if matching_indices_list:
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
batch_pos += num_matches
if batch_pos >= batch_size:
break
prev_r = r
prev_line = lines[r]
prev_indices = annot[r][indices_key]
if batch_pos < batch_size:
matching_indices_list = indices_list[batch_pos:]
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
if input_batch:
return are_omitted, row_indices, start_columns, end_columns
else:
return are_omitted[0], row_indices[0], start_columns[0], end_columns[0]
def _validate_indices_list(indices_list, formatted):
prev_ind = None
for ind in indices_list:
# Check indices match tensor dimensions.
dims = formatted.annotations["tensor_metadata"]["shape"]
if len(ind) != len(dims):
raise ValueError("Dimensions mismatch: requested: %d; actual: %d" %
(len(ind), len(dims)))
# Check indices is within size limits.
for req_idx, siz in zip(ind, dims):
if req_idx >= siz:
raise ValueError("Indices exceed tensor dimensions.")
if req_idx < 0:
raise ValueError("Indices contain negative value(s).")
# Check indices are in ascending order.
if prev_ind and ind < prev_ind:
raise ValueError("Input indices sets are not in ascending order.")
prev_ind = ind
def _locate_elements_in_line(line, indices_list, ref_indices):
"""Determine the start and end indices of an element in a line.
Args:
line: (str) the line in which the element is to be sought.
indices_list: (list of list of int) list of indices of the element to
search for. Assumes that the indices in the batch are unique and sorted
in ascending order.
ref_indices: (list of int) reference indices, i.e., the indices of the
first element represented in the line.
Returns:
start_columns: (list of int) start column indices, if found. If not found,
None.
end_columns: (list of int) end column indices, if found. If not found,
None.
If found, the element is represented in the left-closed-right-open interval
[start_column, end_column].
"""
batch_size = len(indices_list)
offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]
start_columns = [None] * batch_size
end_columns = [None] * batch_size
if _NUMPY_OMISSION in line:
ellipsis_index = line.find(_NUMPY_OMISSION)
else:
ellipsis_index = len(line)
matches_iter = re.finditer(_NUMBER_REGEX, line)
batch_pos = 0
offset_counter = 0
for match in matches_iter:
if match.start() > ellipsis_index:
# Do not attempt to search beyond ellipsis.
break
if offset_counter == offsets[batch_pos]:
start_columns[batch_pos] = match.start()
# Remove the final comma, right bracket, or whitespace.
end_columns[batch_pos] = match.end() - 1
batch_pos += 1
if batch_pos >= batch_size:
break
offset_counter += 1
return start_columns, end_columns
def _pad_string_to_length(string, length):
return " " * (length - len(string)) + string
def numeric_summary(tensor):
"""Get a text summary of a numeric tensor.
This summary is only available for numeric (int*, float*, complex*) and
Boolean tensors.
Args:
tensor: (`numpy.ndarray`) the tensor value object to be summarized.
Returns:
The summary text as a `RichTextLines` object. If the type of `tensor` is not
numeric or Boolean, a single-line `RichTextLines` object containing a
warning message will reflect that.
"""
def _counts_summary(counts, skip_zeros=True, total_count=None):
"""Format values as a two-row table."""
if skip_zeros:
counts = [(count_key, count_val) for count_key, count_val in counts
if count_val]
max_common_len = 0
for count_key, count_val in counts:
count_val_str = str(count_val)
common_len = max(len(count_key) + 1, len(count_val_str) + 1)
max_common_len = max(common_len, max_common_len)
key_line = debugger_cli_common.RichLine("|")
val_line = debugger_cli_common.RichLine("|")
for count_key, count_val in counts:
count_val_str = str(count_val)
key_line += _pad_string_to_length(count_key, max_common_len)
val_line += _pad_string_to_length(count_val_str, max_common_len)
key_line += " |"
val_line += " |"
if total_count is not None:
total_key_str = "total"
total_val_str = str(total_count)
max_common_len = max(len(total_key_str) + 1, len(total_val_str))
total_key_str = _pad_string_to_length(total_key_str, max_common_len)
total_val_str = _pad_string_to_length(total_val_str, max_common_len)
key_line += total_key_str + " |"
val_line += total_val_str + " |"
return debugger_cli_common.rich_text_lines_from_rich_line_list(
[key_line, val_line])
if not isinstance(tensor, np.ndarray) or not np.size(tensor):
return debugger_cli_common.RichTextLines([
"No numeric summary available due to empty tensor."])
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
counts = [
("nan", np.sum(np.isnan(tensor))),
("-inf", np.sum(np.isneginf(tensor))),
("-", np.sum(np.logical_and(
tensor < 0.0, np.logical_not(np.isneginf(tensor))))),
("0", np.sum(tensor == 0.0)),
("+", np.sum(np.logical_and(
tensor > 0.0, np.logical_not(np.isposinf(tensor))))),
("+inf", np.sum(np.isposinf(tensor)))]
output = _counts_summary(counts, total_count=np.size(tensor))
valid_array = tensor[
np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
if np.size(valid_array):
stats = [
("min", np.min(valid_array)),
("max", np.max(valid_array)),
("mean", np.mean(valid_array)),
("std", np.std(valid_array))]
output.extend(_counts_summary(stats, skip_zeros=False))
return output
elif tensor.dtype == np.bool:
counts = [
("False", np.sum(tensor == 0)),
("True", np.sum(tensor > 0)),]
return _counts_summary(counts, total_count=np.size(tensor))
else:
return debugger_cli_common.RichTextLines([
"No numeric summary available due to tensor dtype: %s." % tensor.dtype])
| apache-2.0 |
postlund/home-assistant | homeassistant/components/google_travel_time/sensor.py | 5 | 10356 | """Support for Google travel time sensors."""
from datetime import datetime, timedelta
import logging
import googlemaps
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.helpers import location
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Google"
CONF_DESTINATION = "destination"
CONF_OPTIONS = "options"
CONF_ORIGIN = "origin"
CONF_TRAVEL_MODE = "travel_mode"
DEFAULT_NAME = "Google Travel Time"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
ALL_LANGUAGES = [
"ar",
"bg",
"bn",
"ca",
"cs",
"da",
"de",
"el",
"en",
"es",
"eu",
"fa",
"fi",
"fr",
"gl",
"gu",
"hi",
"hr",
"hu",
"id",
"it",
"iw",
"ja",
"kn",
"ko",
"lt",
"lv",
"ml",
"mr",
"nl",
"no",
"pl",
"pt",
"pt-BR",
"pt-PT",
"ro",
"ru",
"sk",
"sl",
"sr",
"sv",
"ta",
"te",
"th",
"tl",
"tr",
"uk",
"vi",
"zh-CN",
"zh-TW",
]
AVOID = ["tolls", "highways", "ferries", "indoor"]
TRANSIT_PREFS = ["less_walking", "fewer_transfers"]
TRANSPORT_TYPE = ["bus", "subway", "train", "tram", "rail"]
TRAVEL_MODE = ["driving", "walking", "bicycling", "transit"]
TRAVEL_MODEL = ["best_guess", "pessimistic", "optimistic"]
UNITS = ["metric", "imperial"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: "driving"}): vol.All(
dict,
vol.Schema(
{
vol.Optional(CONF_MODE, default="driving"): vol.In(TRAVEL_MODE),
vol.Optional("language"): vol.In(ALL_LANGUAGES),
vol.Optional("avoid"): vol.In(AVOID),
vol.Optional("units"): vol.In(UNITS),
vol.Exclusive("arrival_time", "time"): cv.string,
vol.Exclusive("departure_time", "time"): cv.string,
vol.Optional("traffic_model"): vol.In(TRAVEL_MODEL),
vol.Optional("transit_mode"): vol.In(TRANSPORT_TYPE),
vol.Optional("transit_routing_preference"): vol.In(TRANSIT_PREFS),
}
),
),
}
)
TRACKABLE_DOMAINS = ["device_tracker", "sensor", "zone", "person"]
DATA_KEY = "google_travel_time"
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr)
)
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Google travel time platform."""
def run_setup(event):
"""
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
hass.data.setdefault(DATA_KEY, [])
options = config.get(CONF_OPTIONS)
if options.get("units") is None:
options["units"] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = (
"Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!"
)
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = "{} - {}".format(DEFAULT_NAME, titled_mode)
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(
hass, name, api_key, origin, destination, options
)
hass.data[DATA_KEY].append(sensor)
if sensor.valid_api_connection:
add_entities_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class GoogleTravelTimeSensor(Entity):
"""Representation of a Google travel time sensor."""
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = "min"
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER.error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
return round(_data["duration_in_traffic"]["value"] / 60)
if "duration" in _data:
return round(_data["duration"]["value"] / 60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res["rows"]
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
res["duration_in_traffic"] = _data["duration_in_traffic"]["text"]
if "duration" in _data:
res["duration"] = _data["duration"]["text"]
if "distance" in _data:
res["distance"] = _data["distance"]["text"]
res["origin"] = self._origin
res["destination"] = self._destination
res[ATTR_ATTRIBUTION] = ATTRIBUTION
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get("departure_time")
atime = options_copy.get("arrival_time")
if dtime is not None and ":" in dtime:
options_copy["departure_time"] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy["departure_time"] = dtime
elif atime is None:
options_copy["departure_time"] = "now"
if atime is not None and ":" in atime:
options_copy["arrival_time"] = convert_time_to_utc(atime)
elif atime is not None:
options_copy["arrival_time"] = atime
# Convert device_trackers to google friendly location
if hasattr(self, "_origin_entity_id"):
self._origin = self._get_location_from_entity(self._origin_entity_id)
if hasattr(self, "_destination_entity_id"):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(
self._origin, self._destination, **options_copy
)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location", entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return "%s,%s" % (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == "zone" and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
| apache-2.0 |
40223123/raven | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/constants.py | 603 | 15297 | #!/usr/bin/env python
'''Constants defined by SDL, and needed in pygame.
Note that many of the flags for SDL are not needed in pygame, and are not
included here. These constants are generally accessed from the
`pygame.locals` module. This module is automatically placed in the pygame
namespace, but you will usually want to place them directly into your module's
namespace with the following command::
from pygame.locals import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
#import SDL.constants
# SDL constants taken from https://wiki.libsdl.org/SDLKeycodeLookup
'''
YV12_OVERLAY = SDL.constants.SDL_YV12_OVERLAY
IYUV_OVERLAY = SDL.constants.SDL_IYUV_OVERLAY
YUY2_OVERLAY = SDL.constants.SDL_YUY2_OVERLAY
UYVY_OVERLAY = SDL.constants.SDL_UYVY_OVERLAY
YVYU_OVERLAY = SDL.constants.SDL_YVYU_OVERLAY
SWSURFACE = SDL.constants.SDL_SWSURFACE
HWSURFACE = SDL.constants.SDL_HWSURFACE
RESIZABLE = SDL.constants.SDL_RESIZABLE
ASYNCBLIT = SDL.constants.SDL_ASYNCBLIT
OPENGL = SDL.constants.SDL_OPENGL
OPENGLBLIT = SDL.constants.SDL_OPENGLBLIT
ANYFORMAT = SDL.constants.SDL_ANYFORMAT
HWPALETTE = SDL.constants.SDL_HWPALETTE
DOUBLEBUF = SDL.constants.SDL_DOUBLEBUF
#FULLSCREEN = SDL.constants.SDL_FULLSCREEN
'''
FULLSCREEN = 0
'''
HWACCEL = SDL.constants.SDL_HWACCEL
SRCCOLORKEY = SDL.constants.SDL_SRCCOLORKEY
'''
RLEACCELOK = 254
RLEACCEL = 255
'''
SRCALPHA = SDL.constants.SDL_SRCALPHA
PREALLOC = SDL.constants.SDL_PREALLOC
NOFRAME = SDL.constants.SDL_NOFRAME
GL_RED_SIZE = SDL.constants.SDL_GL_RED_SIZE
GL_GREEN_SIZE = SDL.constants.SDL_GL_GREEN_SIZE
GL_BLUE_SIZE = SDL.constants.SDL_GL_BLUE_SIZE
GL_ALPHA_SIZE = SDL.constants.SDL_GL_ALPHA_SIZE
GL_BUFFER_SIZE = SDL.constants.SDL_GL_BUFFER_SIZE
GL_DOUBLEBUFFER = SDL.constants.SDL_GL_DOUBLEBUFFER
GL_DEPTH_SIZE = SDL.constants.SDL_GL_DEPTH_SIZE
GL_STENCIL_SIZE = SDL.constants.SDL_GL_STENCIL_SIZE
GL_ACCUM_RED_SIZE = SDL.constants.SDL_GL_ACCUM_RED_SIZE
GL_ACCUM_GREEN_SIZE = SDL.constants.SDL_GL_ACCUM_GREEN_SIZE
GL_ACCUM_BLUE_SIZE = SDL.constants.SDL_GL_ACCUM_BLUE_SIZE
GL_ACCUM_ALPHA_SIZE = SDL.constants.SDL_GL_ACCUM_ALPHA_SIZE
GL_STEREO = SDL.constants.SDL_GL_STEREO
GL_MULTISAMPLEBUFFERS = SDL.constants.SDL_GL_MULTISAMPLEBUFFERS
GL_MULTISAMPLESAMPLES = SDL.constants.SDL_GL_MULTISAMPLESAMPLES
TIMER_RESOLUTION = SDL.constants.TIMER_RESOLUTION
AUDIO_U8 = SDL.constants.AUDIO_U8
AUDIO_S8 = SDL.constants.AUDIO_S8
AUDIO_U16LSB = SDL.constants.AUDIO_U16LSB
AUDIO_S16LSB = SDL.constants.AUDIO_S16LSB
AUDIO_U16MSB = SDL.constants.AUDIO_U16MSB
AUDIO_S16MSB = SDL.constants.AUDIO_S16MSB
AUDIO_U16 = SDL.constants.AUDIO_U16
AUDIO_S16 = SDL.constants.AUDIO_S16
AUDIO_U16SYS = SDL.constants.AUDIO_U16SYS
AUDIO_S16SYS = SDL.constants.AUDIO_S16SYS
'''
def _t(a, b, c, d):
return (ord(a) << 24) | (ord(b) << 16) | (ord(c) << 8) | ord(d)
SCRAP_TEXT = _t('T', 'E', 'X', 'T')
SCRAP_BMP = _t('B', 'M', 'P', ' ')
BLEND_ADD = 0x01
BLEND_SUB = 0x02
BLEND_MULT = 0x03
BLEND_MIN = 0x04
BLEND_MAX = 0x05
"""
NOEVENT = SDL.constants.SDL_NOEVENT
ACTIVEEVENT = SDL.constants.SDL_ACTIVEEVENT
KEYDOWN = SDL.constants.SDL_KEYDOWN
KEYUP = SDL.constants.SDL_KEYUP
MOUSEMOTION = SDL.constants.SDL_MOUSEMOTION
MOUSEBUTTONDOWN = SDL.constants.SDL_MOUSEBUTTONDOWN
MOUSEBUTTONUP = SDL.constants.SDL_MOUSEBUTTONUP
JOYAXISMOTION = SDL.constants.SDL_JOYAXISMOTION
JOYBALLMOTION = SDL.constants.SDL_JOYBALLMOTION
JOYHATMOTION = SDL.constants.SDL_JOYHATMOTION
JOYBUTTONDOWN = SDL.constants.SDL_JOYBUTTONDOWN
JOYBUTTONUP = SDL.constants.SDL_JOYBUTTONUP
VIDEORESIZE = SDL.constants.SDL_VIDEORESIZE
VIDEOEXPOSE = SDL.constants.SDL_VIDEOEXPOSE
QUIT = SDL.constants.SDL_QUIT
SYSWMEVENT = SDL.constants.SDL_SYSWMEVENT
USEREVENT = SDL.constants.SDL_USEREVENT
NUMEVENTS = SDL.constants.SDL_NUMEVENTS
HAT_CENTERED = SDL.constants.SDL_HAT_CENTERED
HAT_UP = SDL.constants.SDL_HAT_UP
HAT_RIGHTUP = SDL.constants.SDL_HAT_RIGHTUP
HAT_RIGHT = SDL.constants.SDL_HAT_RIGHT
HAT_RIGHTDOWN = SDL.constants.SDL_HAT_RIGHTDOWN
HAT_DOWN = SDL.constants.SDL_HAT_DOWN
HAT_LEFTDOWN = SDL.constants.SDL_HAT_LEFTDOWN
HAT_LEFT = SDL.constants.SDL_HAT_LEFT
HAT_LEFTUP = SDL.constants.SDL_HAT_LEFTUP
"""
#BEGIN GENERATED CONSTANTS; see support/make_pygame_keyconstants.py
K_0 = 48
K_1 = 49
K_2 = 50
K_3 = 51
K_4 = 52
K_5 = 53
K_6 = 54
K_7 = 55
K_8 = 56
K_9 = 57
K_AMPERSAND = 38
K_ASTERISK = 42
K_AT = 64
K_BACKQUOTE = 96
K_BACKSLASH = 92
K_BACKSPACE = 8
#K_BREAK = SDL.constants.SDLK_BREAK
K_CAPSLOCK = 1073741881
K_CARET = 94
K_CLEAR = 1073742040
K_COLON = 58
K_COMMA = 44
#K_COMPOSE = SDL.constants.SDLK_COMPOSE
K_DELETE = 127
K_DOLLAR = 36
K_DOWN = 1073741905
K_END = 1073741901
K_EQUALS = 1073741927
K_ESCAPE = 27
#K_EURO = SDL.constants.SDLK_EURO
K_EXCLAIM = 33
K_F1 = 1073741882
K_F10 = 1073741891
K_F11 = 1073741892
K_F12 = 1073741893
K_F13 = 1073741928
K_F14 = 1073741929
K_F15 = 1073741930
K_F2 = 1073741883
K_F3 = 1073741884
K_F4 = 1073741885
K_F5 = 1073741886
K_F6 = 1073741887
K_F7 = 1073741888
K_F8 = 1073741889
K_F9 = 1073741890
#K_FIRST = SDL.constants.SDLK_FIRST
K_GREATER = 1073742022
K_HASH = 1073742028
K_HELP = 1073741941
K_HOME = 1073741898
K_INSERT = 1073741897
K_KP0 = 1073741922
K_KP1 = 1073741913
K_KP2 = 1073741914
K_KP3 = 1073741915
K_KP4 = 1073741916
K_KP5 = 1073741917
K_KP6 = 1073741918
K_KP7 = 1073741919
K_KP8 = 1073741920
K_KP9 = 1073741921
K_KP_DIVIDE = 1073741908
K_KP_ENTER = 1073741912
K_KP_EQUALS = 1073741927
K_KP_MINUS = 1073741910
K_KP_MULTIPLY = 1073741909
K_KP_PERIOD = 1073741923
K_KP_PLUS = 1073741911
K_LALT = 1073742050
#K_LAST = SDL.constants.SDLK_LAST
K_LCTRL = 1073742048
K_LEFT = 1073741904
#K_LEFTBRACKET = SDL.constants.SDLK_LEFTBRACKET
K_LEFTPAREN = 1073742006
#K_LESS = SDL.constants.SDLK_LESS
#K_LMETA = SDL.constants.SDLK_LMETA
K_LSHIFT = 1073742049
#K_LSUPER = SDL.constants.SDLK_LSUPER
K_MENU = 1073741942
K_MINUS = 45
K_MODE = 1073742081
#K_NUMLOCK = SDL.constants.SDLK_NUMLOCK
K_PAGEDOWN = 1073741902
K_PAGEUP = 1073741899
K_PAUSE = 1073741896
#K_PERIOD = SDL.constants.SDLK_PERIOD
K_PLUS = 43
#K_POWER = SDL.constants.SDLK_POWER
#K_PRINT = SDL.constants.SDLK_PRINT
K_QUESTION = 63
K_QUOTE = 39
K_QUOTEDBL = 34
K_RALT = 1073742054
K_RCTRL = 1073742052
K_RETURN = 13
K_RIGHT = 1073741903
#K_RIGHTBRACKET = SDL.constants.SDLK_RIGHTBRACKET
K_RIGHTPAREN = 41
#K_RMETA = SDL.constants.SDLK_RMETA
K_RSHIFT = 1073742053
#K_RSUPER = SDL.constants.SDLK_RSUPER
K_SCROLLOCK = 1073741895
K_SEMICOLON = 59
K_SLASH = 47
K_SPACE = 1073742029
K_SYSREQ = 1073741978
K_TAB = 9
K_UNDERSCORE = 95
K_UNDO = 1073741946
K_UNKNOWN = 0
K_UP = 1073741906
"""
K_WORLD_0 = SDL.constants.SDLK_WORLD_0
K_WORLD_1 = SDL.constants.SDLK_WORLD_1
K_WORLD_10 = SDL.constants.SDLK_WORLD_10
K_WORLD_11 = SDL.constants.SDLK_WORLD_11
K_WORLD_12 = SDL.constants.SDLK_WORLD_12
K_WORLD_13 = SDL.constants.SDLK_WORLD_13
K_WORLD_14 = SDL.constants.SDLK_WORLD_14
K_WORLD_15 = SDL.constants.SDLK_WORLD_15
K_WORLD_16 = SDL.constants.SDLK_WORLD_16
K_WORLD_17 = SDL.constants.SDLK_WORLD_17
K_WORLD_18 = SDL.constants.SDLK_WORLD_18
K_WORLD_19 = SDL.constants.SDLK_WORLD_19
K_WORLD_2 = SDL.constants.SDLK_WORLD_2
K_WORLD_20 = SDL.constants.SDLK_WORLD_20
K_WORLD_21 = SDL.constants.SDLK_WORLD_21
K_WORLD_22 = SDL.constants.SDLK_WORLD_22
K_WORLD_23 = SDL.constants.SDLK_WORLD_23
K_WORLD_24 = SDL.constants.SDLK_WORLD_24
K_WORLD_25 = SDL.constants.SDLK_WORLD_25
K_WORLD_26 = SDL.constants.SDLK_WORLD_26
K_WORLD_27 = SDL.constants.SDLK_WORLD_27
K_WORLD_28 = SDL.constants.SDLK_WORLD_28
K_WORLD_29 = SDL.constants.SDLK_WORLD_29
K_WORLD_3 = SDL.constants.SDLK_WORLD_3
K_WORLD_30 = SDL.constants.SDLK_WORLD_30
K_WORLD_31 = SDL.constants.SDLK_WORLD_31
K_WORLD_32 = SDL.constants.SDLK_WORLD_32
K_WORLD_33 = SDL.constants.SDLK_WORLD_33
K_WORLD_34 = SDL.constants.SDLK_WORLD_34
K_WORLD_35 = SDL.constants.SDLK_WORLD_35
K_WORLD_36 = SDL.constants.SDLK_WORLD_36
K_WORLD_37 = SDL.constants.SDLK_WORLD_37
K_WORLD_38 = SDL.constants.SDLK_WORLD_38
K_WORLD_39 = SDL.constants.SDLK_WORLD_39
K_WORLD_4 = SDL.constants.SDLK_WORLD_4
K_WORLD_40 = SDL.constants.SDLK_WORLD_40
K_WORLD_41 = SDL.constants.SDLK_WORLD_41
K_WORLD_42 = SDL.constants.SDLK_WORLD_42
K_WORLD_43 = SDL.constants.SDLK_WORLD_43
K_WORLD_44 = SDL.constants.SDLK_WORLD_44
K_WORLD_45 = SDL.constants.SDLK_WORLD_45
K_WORLD_46 = SDL.constants.SDLK_WORLD_46
K_WORLD_47 = SDL.constants.SDLK_WORLD_47
K_WORLD_48 = SDL.constants.SDLK_WORLD_48
K_WORLD_49 = SDL.constants.SDLK_WORLD_49
K_WORLD_5 = SDL.constants.SDLK_WORLD_5
K_WORLD_50 = SDL.constants.SDLK_WORLD_50
K_WORLD_51 = SDL.constants.SDLK_WORLD_51
K_WORLD_52 = SDL.constants.SDLK_WORLD_52
K_WORLD_53 = SDL.constants.SDLK_WORLD_53
K_WORLD_54 = SDL.constants.SDLK_WORLD_54
K_WORLD_55 = SDL.constants.SDLK_WORLD_55
K_WORLD_56 = SDL.constants.SDLK_WORLD_56
K_WORLD_57 = SDL.constants.SDLK_WORLD_57
K_WORLD_58 = SDL.constants.SDLK_WORLD_58
K_WORLD_59 = SDL.constants.SDLK_WORLD_59
K_WORLD_6 = SDL.constants.SDLK_WORLD_6
K_WORLD_60 = SDL.constants.SDLK_WORLD_60
K_WORLD_61 = SDL.constants.SDLK_WORLD_61
K_WORLD_62 = SDL.constants.SDLK_WORLD_62
K_WORLD_63 = SDL.constants.SDLK_WORLD_63
K_WORLD_64 = SDL.constants.SDLK_WORLD_64
K_WORLD_65 = SDL.constants.SDLK_WORLD_65
K_WORLD_66 = SDL.constants.SDLK_WORLD_66
K_WORLD_67 = SDL.constants.SDLK_WORLD_67
K_WORLD_68 = SDL.constants.SDLK_WORLD_68
K_WORLD_69 = SDL.constants.SDLK_WORLD_69
K_WORLD_7 = SDL.constants.SDLK_WORLD_7
K_WORLD_70 = SDL.constants.SDLK_WORLD_70
K_WORLD_71 = SDL.constants.SDLK_WORLD_71
K_WORLD_72 = SDL.constants.SDLK_WORLD_72
K_WORLD_73 = SDL.constants.SDLK_WORLD_73
K_WORLD_74 = SDL.constants.SDLK_WORLD_74
K_WORLD_75 = SDL.constants.SDLK_WORLD_75
K_WORLD_76 = SDL.constants.SDLK_WORLD_76
K_WORLD_77 = SDL.constants.SDLK_WORLD_77
K_WORLD_78 = SDL.constants.SDLK_WORLD_78
K_WORLD_79 = SDL.constants.SDLK_WORLD_79
K_WORLD_8 = SDL.constants.SDLK_WORLD_8
K_WORLD_80 = SDL.constants.SDLK_WORLD_80
K_WORLD_81 = SDL.constants.SDLK_WORLD_81
K_WORLD_82 = SDL.constants.SDLK_WORLD_82
K_WORLD_83 = SDL.constants.SDLK_WORLD_83
K_WORLD_84 = SDL.constants.SDLK_WORLD_84
K_WORLD_85 = SDL.constants.SDLK_WORLD_85
K_WORLD_86 = SDL.constants.SDLK_WORLD_86
K_WORLD_87 = SDL.constants.SDLK_WORLD_87
K_WORLD_88 = SDL.constants.SDLK_WORLD_88
K_WORLD_89 = SDL.constants.SDLK_WORLD_89
K_WORLD_9 = SDL.constants.SDLK_WORLD_9
K_WORLD_90 = SDL.constants.SDLK_WORLD_90
K_WORLD_91 = SDL.constants.SDLK_WORLD_91
K_WORLD_92 = SDL.constants.SDLK_WORLD_92
K_WORLD_93 = SDL.constants.SDLK_WORLD_93
K_WORLD_94 = SDL.constants.SDLK_WORLD_94
K_WORLD_95 = SDL.constants.SDLK_WORLD_95
"""
K_a = 97
K_b = 98
K_c = 99
K_d = 100
K_e = 101
K_f = 102
K_g = 103
K_h = 104
K_i = 105
K_j = 106
K_k = 107
K_l = 108
K_m = 109
K_n = 110
K_o = 111
K_p = 112
K_q = 113
K_r = 114
K_s = 115
K_t = 116
K_u = 117
K_v = 118
K_w = 119
K_x = 120
K_y = 121
K_z = 122
#END GENERATED CONSTANTS
| gpl-3.0 |
wrouesnel/ansible | lib/ansible/modules/network/cloudengine/ce_bfd_session.py | 16 | 20563 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_bfd_session
version_added: "2.4"
short_description: Manages BFD session configuration on HUAWEI CloudEngine devices.
description:
- Manages BFD session configuration, creates a BFD session or deletes a specified BFD session
on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
session_name:
description:
- Specifies the name of a BFD session.
The value is a string of 1 to 15 case-sensitive characters without spaces.
required: true
default: null
create_type:
description:
- BFD session creation mode, the currently created BFD session
only supports static or static auto-negotiation mode.
required: false
default: null
choices: ['static', 'auto']
addr_type:
description:
- Specifies the peer IP address type.
required: false
default: null
choices: ['ipv4']
out_if_name:
description:
- Specifies the type and number of the interface bound to the BFD session.
required: false
default: null
dest_addr:
description:
- Specifies the peer IP address bound to the BFD session.
required: false
default: null
src_addr:
description:
- Indicates the source IP address carried in BFD packets.
required: false
default: null
vrf_name:
description:
- Specifies the name of a Virtual Private Network (VPN) instance that is bound to a BFD session.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value _public_ is reserved and cannot be used as the VPN instance name.
required: false
default: null
use_default_ip:
description:
- Indicates the default multicast IP address that is bound to a BFD session.
By default, BFD uses the multicast IP address 224.0.0.184.
You can set the multicast IP address by running the default-ip-address command.
The value is a bool type.
required: false
default: false
state:
description:
- Determines whether the config should be present or not on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: bfd session module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring Single-hop BFD for Detecting Faults on a Layer 2 Link
ce_bfd_session:
session_name: bfd_l2link
use_default_ip: true
out_if_name: 10GE1/0/1
provider: '{{ cli }}'
- name: Configuring Single-Hop BFD on a VLANIF Interface
ce_bfd_session:
session_name: bfd_vlanif
dest_addr: 10.1.1.6
out_if_name: Vlanif100
provider: '{{ cli }}'
- name: Configuring Multi-Hop BFD
ce_bfd_session:
session_name: bfd_multi_hop
dest_addr: 10.1.1.1
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"addr_type": null,
"create_type": null,
"dest_addr": null,
"out_if_name": "10GE1/0/1",
"session_name": "bfd_l2link",
"src_addr": null,
"state": "present",
"use_default_ip": true,
"vrf_name": null
}
existing:
description: k/v pairs of existing configuration
returned: always
type: dict
sample: {
"session": {}
}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {
"session": {
"addrType": "IPV4",
"createType": "SESS_STATIC",
"destAddr": null,
"outIfName": "10GE1/0/1",
"sessName": "bfd_l2link",
"srcAddr": null,
"useDefaultIp": "true",
"vrfName": null
}
}
updates:
description: commands sent to the device
returned: always
type: list
sample: [
"bfd bfd_l2link bind peer-ip default-ip interface 10ge1/0/1"
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import sys
import socket
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
CE_NC_GET_BFD = """
<filter type="subtree">
<bfd xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</bfd>
</filter>
"""
CE_NC_GET_BFD_GLB = """
<bfdSchGlobal>
<bfdEnable></bfdEnable>
<defaultIp></defaultIp>
</bfdSchGlobal>
"""
CE_NC_GET_BFD_SESSION = """
<bfdCfgSessions>
<bfdCfgSession>
<sessName>%s</sessName>
<createType></createType>
<addrType></addrType>
<outIfName></outIfName>
<destAddr></destAddr>
<srcAddr></srcAddr>
<vrfName></vrfName>
<useDefaultIp></useDefaultIp>
</bfdCfgSession>
</bfdCfgSessions>
"""
def is_valid_ip_vpn(vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
return False
if len(vpname) < 1 or len(vpname) > 31:
return False
return True
def check_default_ip(ipaddr):
"""check the default multicast IP address"""
# The value ranges from 224.0.0.107 to 224.0.0.250
if not check_ip_addr(ipaddr):
return False
if ipaddr.count(".") != 3:
return False
ips = ipaddr.split(".")
if ips[0] != "224" or ips[1] != "0" or ips[2] != "0":
return False
if not ips[3].isdigit() or int(ips[3]) < 107 or int(ips[3]) > 250:
return False
return True
def get_interface_type(interface):
"""get the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class BfdSession(object):
"""Manages BFD Session"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.session_name = self.module.params['session_name']
self.create_type = self.module.params['create_type']
self.addr_type = self.module.params['addr_type']
self.out_if_name = self.module.params['out_if_name']
self.dest_addr = self.module.params['dest_addr']
self.src_addr = self.module.params['src_addr']
self.vrf_name = self.module.params['vrf_name']
self.use_default_ip = self.module.params['use_default_ip']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.bfd_dict = dict()
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""init module"""
mutually_exclusive = [('use_default_ip', 'dest_addr')]
self.module = AnsibleModule(argument_spec=self.spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
def get_bfd_dict(self):
"""bfd config dict"""
bfd_dict = dict()
bfd_dict["global"] = dict()
bfd_dict["session"] = dict()
conf_str = CE_NC_GET_BFD % (CE_NC_GET_BFD_GLB + (CE_NC_GET_BFD_SESSION % self.session_name))
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return bfd_dict
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get bfd global info
glb = root.find("data/bfd/bfdSchGlobal")
if glb:
for attr in glb:
bfd_dict["global"][attr.tag] = attr.text
# get bfd session info
sess = root.find("data/bfd/bfdCfgSessions/bfdCfgSession")
if sess:
for attr in sess:
bfd_dict["session"][attr.tag] = attr.text
return bfd_dict
def is_session_match(self):
"""is bfd session match"""
if not self.bfd_dict["session"] or not self.session_name:
return False
session = self.bfd_dict["session"]
if self.session_name != session.get("sessName", ""):
return False
if self.create_type and self.create_type.upper() not in session.get("createType", "").upper():
return False
if self.addr_type and self.addr_type != session.get("addrType").lower():
return False
if self.dest_addr and self.dest_addr != session.get("destAddr"):
return False
if self.src_addr and self.src_addr != session.get("srcAddr"):
return False
if self.out_if_name:
if not session.get("outIfName"):
return False
if self.out_if_name.replace(" ", "").lower() != session.get("outIfName").replace(" ", "").lower():
return False
if self.vrf_name and self.vrf_name != session.get("vrfName"):
return False
if str(self.use_default_ip).lower() != session.get("useDefaultIp"):
return False
return True
def config_session(self):
"""configures bfd session"""
xml_str = ""
cmd_list = list()
if not self.session_name:
return xml_str
if self.bfd_dict["global"].get("bfdEnable", "false") != "true":
self.module.fail_json(msg="Error: Please enable BFD globally first.")
xml_str = "<sessName>%s</sessName>" % self.session_name
cmd_session = "bfd %s" % self.session_name
if self.state == "present":
if not self.bfd_dict["session"]:
# Parameter check
if not self.dest_addr and not self.use_default_ip:
self.module.fail_json(
msg="Error: dest_addr or use_default_ip must be set when bfd session is creating.")
# Creates a BFD session
if self.create_type:
xml_str += "<createType>SESS_%s</createType>" % self.create_type.upper()
else:
xml_str += "<createType>SESS_STATIC</createType>"
xml_str += "<linkType>IP</linkType>"
cmd_session += " bind"
if self.addr_type:
xml_str += "<addrType>%s</addrType>" % self.addr_type.upper()
else:
xml_str += "<addrType>IPV4</addrType>"
if self.dest_addr:
xml_str += "<destAddr>%s</destAddr>" % self.dest_addr
cmd_session += " peer-%s %s" % ("ipv6" if self.addr_type == "ipv6" else "ip", self.dest_addr)
if self.use_default_ip:
xml_str += "<useDefaultIp>%s</useDefaultIp>" % str(self.use_default_ip).lower()
cmd_session += " peer-ip default-ip"
if self.vrf_name:
xml_str += "<vrfName>%s</vrfName>" % self.vrf_name
cmd_session += " vpn-instance %s" % self.vrf_name
if self.out_if_name:
xml_str += "<outIfName>%s</outIfName>" % self.out_if_name
cmd_session += " interface %s" % self.out_if_name.lower()
if self.src_addr:
xml_str += "<srcAddr>%s</srcAddr>" % self.src_addr
cmd_session += " source-%s %s" % ("ipv6" if self.addr_type == "ipv6" else "ip", self.src_addr)
if self.create_type == "auto":
cmd_session += " auto"
elif not self.is_session_match():
# Bfd session is not match
self.module.fail_json(msg="Error: The specified BFD configuration view has been created.")
else:
pass
else: # absent
if not self.bfd_dict["session"]:
self.module.fail_json(msg="Error: BFD session is not exist.")
if not self.is_session_match():
self.module.fail_json(msg="Error: BFD session parameter is invalid.")
if self.state == "present":
if xml_str.endswith("</sessName>"):
# no config update
return ""
else:
cmd_list.insert(0, cmd_session)
self.updates_cmd.extend(cmd_list)
return '<bfdCfgSessions><bfdCfgSession operation="merge">' + xml_str\
+ '</bfdCfgSession></bfdCfgSessions>'
else: # absent
cmd_list.append("undo " + cmd_session)
self.updates_cmd.extend(cmd_list)
return '<bfdCfgSessions><bfdCfgSession operation="delete">' + xml_str\
+ '</bfdCfgSession></bfdCfgSessions>'
def netconf_load_config(self, xml_str):
"""load bfd config by netconf"""
if not xml_str:
return
xml_cfg = """
<config>
<bfd xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</bfd>
</config>""" % xml_str
set_nc_config(self.module, xml_cfg)
self.changed = True
def check_params(self):
"""Check all input params"""
# check session_name
if not self.session_name:
self.module.fail_json(msg="Error: Missing required arguments: session_name.")
if self.session_name:
if len(self.session_name) < 1 or len(self.session_name) > 15:
self.module.fail_json(msg="Error: Session name is invalid.")
# check out_if_name
if self.out_if_name:
if not get_interface_type(self.out_if_name):
self.module.fail_json(msg="Error: Session out_if_name is invalid.")
# check dest_addr
if self.dest_addr:
if not check_ip_addr(self.dest_addr):
self.module.fail_json(msg="Error: Session dest_addr is invalid.")
# check src_addr
if self.src_addr:
if not check_ip_addr(self.src_addr):
self.module.fail_json(msg="Error: Session src_addr is invalid.")
# check vrf_name
if self.vrf_name:
if not is_valid_ip_vpn(self.vrf_name):
self.module.fail_json(msg="Error: Session vrf_name is invalid.")
if not self.dest_addr:
self.module.fail_json(msg="Error: vrf_name and dest_addr must set at the same time.")
# check use_default_ip
if self.use_default_ip and not self.out_if_name:
self.module.fail_json(msg="Error: use_default_ip and out_if_name must set at the same time.")
def get_proposed(self):
"""get proposed info"""
# base config
self.proposed["session_name"] = self.session_name
self.proposed["create_type"] = self.create_type
self.proposed["addr_type"] = self.addr_type
self.proposed["out_if_name"] = self.out_if_name
self.proposed["dest_addr"] = self.dest_addr
self.proposed["src_addr"] = self.src_addr
self.proposed["vrf_name"] = self.vrf_name
self.proposed["use_default_ip"] = self.use_default_ip
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.bfd_dict:
return
self.existing["session"] = self.bfd_dict.get("session")
def get_end_state(self):
"""get end state info"""
bfd_dict = self.get_bfd_dict()
if not bfd_dict:
return
self.end_state["session"] = bfd_dict.get("session")
def work(self):
"""worker"""
self.check_params()
self.bfd_dict = self.get_bfd_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
xml_str = ''
if self.session_name:
xml_str += self.config_session()
# update to device
if xml_str:
self.netconf_load_config(xml_str)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
session_name=dict(required=True, type='str'),
create_type=dict(required=False, type='str', choices=['static', 'auto']),
addr_type=dict(required=False, type='str', choices=['ipv4']),
out_if_name=dict(required=False, type='str'),
dest_addr=dict(required=False, type='str'),
src_addr=dict(required=False, type='str'),
vrf_name=dict(required=False, type='str'),
use_default_ip=dict(required=False, type='bool', default=False),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = BfdSession(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
chrisseto/osf.io | osf/models/metaschema.py | 10 | 2428 | # -*- coding: utf-8 -*-
from django.db import models
import jsonschema
from website.util import api_v2_url
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.exceptions import ValidationValueError
from website.project.metadata.utils import create_jsonschema_from_metaschema
class MetaSchema(ObjectIDMixin, BaseModel):
name = models.CharField(max_length=255)
schema = DateTimeAwareJSONField(default=dict)
category = models.CharField(max_length=255, null=True, blank=True)
active = models.BooleanField(default=True)
# Version of the schema to use (e.g. if questions, responses change)
schema_version = models.IntegerField()
class Meta:
unique_together = ('name', 'schema_version')
def __unicode__(self):
return '(name={}, schema_version={}, id={})'.format(self.name, self.schema_version, self.id)
@property
def _config(self):
return self.schema.get('config', {})
@property
def requires_approval(self):
return self._config.get('requiresApproval', False)
@property
def fulfills(self):
return self._config.get('fulfills', [])
@property
def messages(self):
return self._config.get('messages', {})
@property
def requires_consent(self):
return self._config.get('requiresConsent', False)
@property
def has_files(self):
return self._config.get('hasFiles', False)
@property
def absolute_api_v2_url(self):
path = '/metaschemas/{}/'.format(self._id)
return api_v2_url(path)
@classmethod
def get_prereg_schema(cls):
return cls.objects.get(
name='Prereg Challenge',
schema_version=2
)
def validate_metadata(self, metadata, reviewer=False, required_fields=False):
"""
Validates registration_metadata field.
"""
schema = create_jsonschema_from_metaschema(self.schema,
required_fields=required_fields,
is_reviewer=reviewer)
try:
jsonschema.validate(metadata, schema)
except jsonschema.ValidationError as e:
raise ValidationValueError(e.message)
except jsonschema.SchemaError as e:
raise ValidationValueError(e.message)
return
| apache-2.0 |
AdamWill/python-fedora | doc/conf.py | 5 | 5294 | # -*- coding: utf-8 -*-
#
# Python Fedora Module documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 08:12:44 2008.
#
# This file is execfile()d with the current directory set to its containing
# dir.
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import fedora.release
# If your extensions are in another directory, add it here.
#sys.path.append(os.path.dirname(__file__))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = fedora.release.NAME
copyright = fedora.release.COPYRIGHT
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = fedora.release.VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
#exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = fedora.release.DOWNLOAD_URL + 'doc/'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinxdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
(
'index',
'Python Fedora Module.tex',
'Python Fedora Module Documentation',
'Toshio Kuratomi',
'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
automodule_skip_lines = 4
| gpl-2.0 |
liukaijv/XlsxWriter | xlsxwriter/test/comparison/test_chart_font05.py | 8 | 1869 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_font05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [49407488, 53740288]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({'name': 'Title'})
chart.set_x_axis({
'name': 'XXX',
'num_font': {'name': 'Arial', 'pitch_family': 34, 'charset': 0}
})
chart.set_y_axis({
'name': 'YYY',
'num_font': {'bold': 1, 'italic': 1, 'underline': 1}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
namecoin/namecoin-core | test/get_previous_releases.py | 25 | 9262 | #!/usr/bin/env python3
#
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "bitcoin-0.15.2-aarch64-linux-gnu.tar.gz",
"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "bitcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
"2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "bitcoin-0.15.2-i686-pc-linux-gnu.tar.gz",
"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "bitcoin-0.15.2-osx64.tar.gz",
"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "bitcoin-0.15.2-x86_64-linux-gnu.tar.gz",
"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "bitcoin-0.16.3-aarch64-linux-gnu.tar.gz",
"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "bitcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
"75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "bitcoin-0.16.3-i686-pc-linux-gnu.tar.gz",
"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "bitcoin-0.16.3-osx64.tar.gz",
"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "bitcoin-0.16.3-x86_64-linux-gnu.tar.gz",
"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "bitcoin-0.17.2-aarch64-linux-gnu.tar.gz",
"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "bitcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
"d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "bitcoin-0.17.2-i686-pc-linux-gnu.tar.gz",
"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "bitcoin-0.17.2-osx64.tar.gz",
"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "bitcoin-0.17.2-x86_64-linux-gnu.tar.gz",
"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "bitcoin-0.18.1-aarch64-linux-gnu.tar.gz",
"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "bitcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
"989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "bitcoin-0.18.1-i686-pc-linux-gnu.tar.gz",
"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "bitcoin-0.18.1-osx64.tar.gz",
"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "bitcoin-0.18.1-riscv64-linux-gnu.tar.gz",
"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "bitcoin-0.18.1-x86_64-linux-gnu.tar.gz",
"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "bitcoin-0.19.1-aarch64-linux-gnu.tar.gz",
"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "bitcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "bitcoin-0.19.1-i686-pc-linux-gnu.tar.gz",
"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "bitcoin-0.19.1-osx64.tar.gz",
"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "bitcoin-0.19.1-riscv64-linux-gnu.tar.gz",
"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "bitcoin-0.19.1-x86_64-linux-gnu.tar.gz"
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/bitcoin-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/bitcoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'bitcoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
tarballUrl = 'https://bitcoincore.org/{bin_path}/{tarball}'.format(
bin_path=bin_path, tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
print("Checksum did not match")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'bitcoin-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/bitcoin/bitcoin"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['bitcoind', 'bitcoin-cli', 'bitcoin-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'aarch64-*-linux*': 'aarch64-linux-gnu',
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args))
| mit |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/dumbdbm.py | 251 | 8820 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
self._chmod(self._datfile)
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
f.close()
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
f = self._open(self._dirfile, 'w')
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
f = _open(self._dirfile, 'a')
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
| mit |
DEVELByte/incubator-airflow | airflow/contrib/sensors/gcs_sensor.py | 4 | 2452 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.operators.sensors import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageObjectSensor(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
"""
template_fields = ('bucket', 'object')
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket,
object,
google_cloud_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new GoogleCloudStorageDownloadOperator.
:param bucket: The Google cloud storage bucket where the object is.
:type bucket: string
:param object: The name of the object to check in the Google cloud
storage bucket.
:type object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(GoogleCloudStorageObjectSensor, self).__init__(*args, **kwargs)
self.bucket = bucket
self.object = object
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
def poke(self, context):
logging.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to)
return hook.exists(self.bucket, self.object)
| apache-2.0 |
ga7g08/sympy | sympy/physics/vector/tests/test_output.py | 117 | 2594 | from sympy import S
from sympy.physics.vector import Vector, ReferenceFrame, Dyadic
from sympy.utilities.pytest import raises
Vector.simp = True
A = ReferenceFrame('A')
def test_output_type():
A = ReferenceFrame('A')
v = A.x + A.y
d = v | v
zerov = Vector(0)
zerod = Dyadic(0)
# dot products
assert isinstance(d & d, Dyadic)
assert isinstance(d & zerod, Dyadic)
assert isinstance(zerod & d, Dyadic)
assert isinstance(d & v, Vector)
assert isinstance(v & d, Vector)
assert isinstance(d & zerov, Vector)
assert isinstance(zerov & d, Vector)
raises(TypeError, lambda: d & S(0))
raises(TypeError, lambda: S(0) & d)
raises(TypeError, lambda: d & 0)
raises(TypeError, lambda: 0 & d)
assert not isinstance(v & v, (Vector, Dyadic))
assert not isinstance(v & zerov, (Vector, Dyadic))
assert not isinstance(zerov & v, (Vector, Dyadic))
raises(TypeError, lambda: v & S(0))
raises(TypeError, lambda: S(0) & v)
raises(TypeError, lambda: v & 0)
raises(TypeError, lambda: 0 & v)
# cross products
raises(TypeError, lambda: d ^ d)
raises(TypeError, lambda: d ^ zerod)
raises(TypeError, lambda: zerod ^ d)
assert isinstance(d ^ v, Dyadic)
assert isinstance(v ^ d, Dyadic)
assert isinstance(d ^ zerov, Dyadic)
assert isinstance(zerov ^ d, Dyadic)
assert isinstance(zerov ^ d, Dyadic)
raises(TypeError, lambda: d ^ S(0))
raises(TypeError, lambda: S(0) ^ d)
raises(TypeError, lambda: d ^ 0)
raises(TypeError, lambda: 0 ^ d)
assert isinstance(v ^ v, Vector)
assert isinstance(v ^ zerov, Vector)
assert isinstance(zerov ^ v, Vector)
raises(TypeError, lambda: v ^ S(0))
raises(TypeError, lambda: S(0) ^ v)
raises(TypeError, lambda: v ^ 0)
raises(TypeError, lambda: 0 ^ v)
# outer products
raises(TypeError, lambda: d | d)
raises(TypeError, lambda: d | zerod)
raises(TypeError, lambda: zerod | d)
raises(TypeError, lambda: d | v)
raises(TypeError, lambda: v | d)
raises(TypeError, lambda: d | zerov)
raises(TypeError, lambda: zerov | d)
raises(TypeError, lambda: zerov | d)
raises(TypeError, lambda: d | S(0))
raises(TypeError, lambda: S(0) | d)
raises(TypeError, lambda: d | 0)
raises(TypeError, lambda: 0 | d)
assert isinstance(v | v, Dyadic)
assert isinstance(v | zerov, Dyadic)
assert isinstance(zerov | v, Dyadic)
raises(TypeError, lambda: v | S(0))
raises(TypeError, lambda: S(0) | v)
raises(TypeError, lambda: v | 0)
raises(TypeError, lambda: 0 | v)
| bsd-3-clause |
gangadharkadam/stfrappe | frappe/modules/utils.py | 39 | 4119 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os
import frappe.modules
from frappe.utils import cstr
from frappe.modules import export_doc, get_module_path, scrub
def listfolders(path, only_name=0):
"""
Returns the list of folders (with paths) in the given path,
If only_name is set, it returns only the folder names
"""
out = []
for each in os.listdir(path):
each = cstr(each)
dirname = each.split(os.path.sep)[-1]
fullpath = os.path.join(path, dirname)
if os.path.isdir(fullpath) and not dirname.startswith('.'):
out.append(only_name and dirname or fullpath)
return out
def switch_module(dt, dn, to, frm=None, export=None):
"""
Change the module of the given doctype, if export is true, then also export txt and copy
code files from src
"""
frappe.db.sql("update `tab"+dt+"` set module=%s where name=%s", (to, dn))
if export:
export_doc(dt, dn)
# copy code files
if dt in ('DocType', 'Page', 'Report'):
from_path = os.path.join(get_module_path(frm), scrub(dt), scrub(dn), scrub(dn))
to_path = os.path.join(get_module_path(to), scrub(dt), scrub(dn), scrub(dn))
# make dire if exists
os.system('mkdir -p %s' % os.path.join(get_module_path(to), scrub(dt), scrub(dn)))
for ext in ('py','js','html','css'):
os.system('cp %s %s')
def commonify_doclist(doclist, with_comments=1):
"""
Makes a doclist more readable by extracting common properties.
This is used for printing Documents in files
"""
from frappe.utils import get_common_dict, get_diff_dict
def make_common(doclist):
c = {}
if with_comments:
c['##comment'] = 'These values are common in all dictionaries'
for k in common_keys:
c[k] = doclist[0][k]
return c
def strip_common_and_idx(d):
for k in common_keys:
if k in d: del d[k]
if 'idx' in d: del d['idx']
return d
def make_common_dicts(doclist):
common_dict = {} # one per doctype
# make common dicts for all records
for d in doclist:
if not d['doctype'] in common_dict:
d1 = d.copy()
if d1.has_key("name"):
del d1['name']
common_dict[d['doctype']] = d1
else:
common_dict[d['doctype']] = get_common_dict(common_dict[d['doctype']], d)
return common_dict
common_keys = ['owner','docstatus','creation','modified','modified_by']
common_dict = make_common_dicts(doclist)
# make docs
final = []
for d in doclist:
f = strip_common_and_idx(get_diff_dict(common_dict[d['doctype']], d))
f['doctype'] = d['doctype'] # keep doctype!
# strip name for child records (only an auto generated number!)
if f['doctype'] != doclist[0]['doctype'] and f.has_key("name"):
del f['name']
if with_comments:
f['##comment'] = d['doctype'] + ('name' in f and (', ' + f['name']) or '')
final.append(f)
# add commons
commons = []
for d in common_dict.values():
d['name']='__common__'
if with_comments:
d['##comment'] = 'These values are common for all ' + d['doctype']
commons.append(strip_common_and_idx(d))
common_values = make_common(doclist)
return [common_values]+commons+final
def uncommonify_doclist(dl):
"""
Expands an commonified doclist
"""
# first one has common values
common_values = dl[0]
common_dict = frappe._dict()
final = []
idx_dict = {}
for d in dl[1:]:
if 'name' in d and d['name']=='__common__':
# common for a doctype -
del d['name']
common_dict[d['doctype']] = d
else:
dt = d['doctype']
if not dt in idx_dict: idx_dict[dt] = 1;
d1 = frappe._dict(common_values.copy())
# update from common and global
d1.update(common_dict[dt])
d1.update(d)
# idx by sequence
d1['idx'] = idx_dict[dt]
# increment idx
idx_dict[dt] += 1
final.append(d1)
return final
def pprint_doclist(doclist, with_comments = 1):
from json import dumps
return dumps(commonify_doclist(doclist, False), indent=1, sort_keys=True)
def peval_doclist(txt):
from json import loads
try:
return uncommonify_doclist(loads(txt))
except Exception, e:
return uncommonify_doclist(eval(txt))
| mit |
bytor99999/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.